LLVM 19.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
57#include "llvm/ADT/STLExtras.h"
59#include "llvm/ADT/SmallSet.h"
62#include "llvm/ADT/StringMap.h"
63#include "llvm/ADT/StringRef.h"
64#include "llvm/ADT/Twine.h"
66#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CFG.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Comdat.h"
73#include "llvm/IR/Constant.h"
75#include "llvm/IR/Constants.h"
77#include "llvm/IR/DataLayout.h"
78#include "llvm/IR/DebugInfo.h"
80#include "llvm/IR/DebugLoc.h"
82#include "llvm/IR/Dominators.h"
84#include "llvm/IR/Function.h"
85#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
102#include "llvm/IR/Metadata.h"
103#include "llvm/IR/Module.h"
105#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
115#include "llvm/Support/Casting.h"
120#include <algorithm>
121#include <cassert>
122#include <cstdint>
123#include <memory>
124#include <optional>
125#include <string>
126#include <utility>
127
128using namespace llvm;
129
131 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
132 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
133 "scopes are not dominating"));
134
135namespace llvm {
136
139 const Module &M;
144
145 /// Track the brokenness of the module while recursively visiting.
146 bool Broken = false;
147 /// Broken debug info can be "recovered" from by stripping the debug info.
148 bool BrokenDebugInfo = false;
149 /// Whether to treat broken debug info as an error.
151
153 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
154 Context(M.getContext()) {}
155
156private:
157 void Write(const Module *M) {
158 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
159 }
160
161 void Write(const Value *V) {
162 if (V)
163 Write(*V);
164 }
165
166 void Write(const Value &V) {
167 if (isa<Instruction>(V)) {
168 V.print(*OS, MST);
169 *OS << '\n';
170 } else {
171 V.printAsOperand(*OS, true, MST);
172 *OS << '\n';
173 }
174 }
175
176 void Write(const DbgRecord *DR) {
177 if (DR) {
178 DR->print(*OS, MST, false);
179 *OS << '\n';
180 }
181 }
182
184 switch (Type) {
186 *OS << "value";
187 break;
189 *OS << "declare";
190 break;
192 *OS << "assign";
193 break;
195 *OS << "end";
196 break;
198 *OS << "any";
199 break;
200 };
201 }
202
203 void Write(const Metadata *MD) {
204 if (!MD)
205 return;
206 MD->print(*OS, MST, &M);
207 *OS << '\n';
208 }
209
210 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
211 Write(MD.get());
212 }
213
214 void Write(const NamedMDNode *NMD) {
215 if (!NMD)
216 return;
217 NMD->print(*OS, MST);
218 *OS << '\n';
219 }
220
221 void Write(Type *T) {
222 if (!T)
223 return;
224 *OS << ' ' << *T;
225 }
226
227 void Write(const Comdat *C) {
228 if (!C)
229 return;
230 *OS << *C;
231 }
232
233 void Write(const APInt *AI) {
234 if (!AI)
235 return;
236 *OS << *AI << '\n';
237 }
238
239 void Write(const unsigned i) { *OS << i << '\n'; }
240
241 // NOLINTNEXTLINE(readability-identifier-naming)
242 void Write(const Attribute *A) {
243 if (!A)
244 return;
245 *OS << A->getAsString() << '\n';
246 }
247
248 // NOLINTNEXTLINE(readability-identifier-naming)
249 void Write(const AttributeSet *AS) {
250 if (!AS)
251 return;
252 *OS << AS->getAsString() << '\n';
253 }
254
255 // NOLINTNEXTLINE(readability-identifier-naming)
256 void Write(const AttributeList *AL) {
257 if (!AL)
258 return;
259 AL->print(*OS);
260 }
261
262 void Write(Printable P) { *OS << P << '\n'; }
263
264 template <typename T> void Write(ArrayRef<T> Vs) {
265 for (const T &V : Vs)
266 Write(V);
267 }
268
269 template <typename T1, typename... Ts>
270 void WriteTs(const T1 &V1, const Ts &... Vs) {
271 Write(V1);
272 WriteTs(Vs...);
273 }
274
275 template <typename... Ts> void WriteTs() {}
276
277public:
278 /// A check failed, so printout out the condition and the message.
279 ///
280 /// This provides a nice place to put a breakpoint if you want to see why
281 /// something is not correct.
282 void CheckFailed(const Twine &Message) {
283 if (OS)
284 *OS << Message << '\n';
285 Broken = true;
286 }
287
288 /// A check failed (with values to print).
289 ///
290 /// This calls the Message-only version so that the above is easier to set a
291 /// breakpoint on.
292 template <typename T1, typename... Ts>
293 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
294 CheckFailed(Message);
295 if (OS)
296 WriteTs(V1, Vs...);
297 }
298
299 /// A debug info check failed.
300 void DebugInfoCheckFailed(const Twine &Message) {
301 if (OS)
302 *OS << Message << '\n';
304 BrokenDebugInfo = true;
305 }
306
307 /// A debug info check failed (with values to print).
308 template <typename T1, typename... Ts>
309 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
310 const Ts &... Vs) {
311 DebugInfoCheckFailed(Message);
312 if (OS)
313 WriteTs(V1, Vs...);
314 }
315};
316
317} // namespace llvm
318
319namespace {
320
321class Verifier : public InstVisitor<Verifier>, VerifierSupport {
322 friend class InstVisitor<Verifier>;
323
324 // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
325 // the alignment size should not exceed 2^15. Since encode(Align)
326 // would plus the shift value by 1, the alignment size should
327 // not exceed 2^14, otherwise it can NOT be properly lowered
328 // in backend.
329 static constexpr unsigned ParamMaxAlignment = 1 << 14;
330 DominatorTree DT;
331
332 /// When verifying a basic block, keep track of all of the
333 /// instructions we have seen so far.
334 ///
335 /// This allows us to do efficient dominance checks for the case when an
336 /// instruction has an operand that is an instruction in the same block.
337 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
338
339 /// Keep track of the metadata nodes that have been checked already.
341
342 /// Keep track which DISubprogram is attached to which function.
344
345 /// Track all DICompileUnits visited.
347
348 /// The result type for a landingpad.
349 Type *LandingPadResultTy;
350
351 /// Whether we've seen a call to @llvm.localescape in this function
352 /// already.
353 bool SawFrameEscape;
354
355 /// Whether the current function has a DISubprogram attached to it.
356 bool HasDebugInfo = false;
357
358 /// The current source language.
360
361 /// Stores the count of how many objects were passed to llvm.localescape for a
362 /// given function and the largest index passed to llvm.localrecover.
364
365 // Maps catchswitches and cleanuppads that unwind to siblings to the
366 // terminators that indicate the unwind, used to detect cycles therein.
368
369 /// Cache which blocks are in which funclet, if an EH funclet personality is
370 /// in use. Otherwise empty.
371 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
372
373 /// Cache of constants visited in search of ConstantExprs.
374 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
375
376 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
377 SmallVector<const Function *, 4> DeoptimizeDeclarations;
378
379 /// Cache of attribute lists verified.
380 SmallPtrSet<const void *, 32> AttributeListsVisited;
381
382 // Verify that this GlobalValue is only used in this module.
383 // This map is used to avoid visiting uses twice. We can arrive at a user
384 // twice, if they have multiple operands. In particular for very large
385 // constant expressions, we can arrive at a particular user many times.
386 SmallPtrSet<const Value *, 32> GlobalValueVisited;
387
388 // Keeps track of duplicate function argument debug info.
390
391 TBAAVerifier TBAAVerifyHelper;
392 ConvergenceVerifier ConvergenceVerifyHelper;
393
394 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
395
396 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
397
398public:
399 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
400 const Module &M)
401 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
402 SawFrameEscape(false), TBAAVerifyHelper(this) {
403 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
404 }
405
406 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
407
408 bool verify(const Function &F) {
409 assert(F.getParent() == &M &&
410 "An instance of this class only works with a specific module!");
411
412 // First ensure the function is well-enough formed to compute dominance
413 // information, and directly compute a dominance tree. We don't rely on the
414 // pass manager to provide this as it isolates us from a potentially
415 // out-of-date dominator tree and makes it significantly more complex to run
416 // this code outside of a pass manager.
417 // FIXME: It's really gross that we have to cast away constness here.
418 if (!F.empty())
419 DT.recalculate(const_cast<Function &>(F));
420
421 for (const BasicBlock &BB : F) {
422 if (!BB.empty() && BB.back().isTerminator())
423 continue;
424
425 if (OS) {
426 *OS << "Basic Block in function '" << F.getName()
427 << "' does not have terminator!\n";
428 BB.printAsOperand(*OS, true, MST);
429 *OS << "\n";
430 }
431 return false;
432 }
433
434 auto FailureCB = [this](const Twine &Message) {
435 this->CheckFailed(Message);
436 };
437 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
438
439 Broken = false;
440 // FIXME: We strip const here because the inst visitor strips const.
441 visit(const_cast<Function &>(F));
442 verifySiblingFuncletUnwinds();
443
444 if (ConvergenceVerifyHelper.sawTokens())
445 ConvergenceVerifyHelper.verify(DT);
446
447 InstsInThisBlock.clear();
448 DebugFnArgs.clear();
449 LandingPadResultTy = nullptr;
450 SawFrameEscape = false;
451 SiblingFuncletInfo.clear();
452 verifyNoAliasScopeDecl();
453 NoAliasScopeDecls.clear();
454
455 return !Broken;
456 }
457
458 /// Verify the module that this instance of \c Verifier was initialized with.
459 bool verify() {
460 Broken = false;
461
462 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
463 for (const Function &F : M)
464 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
465 DeoptimizeDeclarations.push_back(&F);
466
467 // Now that we've visited every function, verify that we never asked to
468 // recover a frame index that wasn't escaped.
469 verifyFrameRecoverIndices();
470 for (const GlobalVariable &GV : M.globals())
471 visitGlobalVariable(GV);
472
473 for (const GlobalAlias &GA : M.aliases())
474 visitGlobalAlias(GA);
475
476 for (const GlobalIFunc &GI : M.ifuncs())
477 visitGlobalIFunc(GI);
478
479 for (const NamedMDNode &NMD : M.named_metadata())
480 visitNamedMDNode(NMD);
481
482 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
483 visitComdat(SMEC.getValue());
484
485 visitModuleFlags();
486 visitModuleIdents();
487 visitModuleCommandLines();
488
489 verifyCompileUnits();
490
491 verifyDeoptimizeCallingConvs();
492 DISubprogramAttachments.clear();
493 return !Broken;
494 }
495
496private:
497 /// Whether a metadata node is allowed to be, or contain, a DILocation.
498 enum class AreDebugLocsAllowed { No, Yes };
499
500 // Verification methods...
501 void visitGlobalValue(const GlobalValue &GV);
502 void visitGlobalVariable(const GlobalVariable &GV);
503 void visitGlobalAlias(const GlobalAlias &GA);
504 void visitGlobalIFunc(const GlobalIFunc &GI);
505 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
506 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
507 const GlobalAlias &A, const Constant &C);
508 void visitNamedMDNode(const NamedMDNode &NMD);
509 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
510 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
511 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
512 void visitDIArgList(const DIArgList &AL, Function *F);
513 void visitComdat(const Comdat &C);
514 void visitModuleIdents();
515 void visitModuleCommandLines();
516 void visitModuleFlags();
517 void visitModuleFlag(const MDNode *Op,
519 SmallVectorImpl<const MDNode *> &Requirements);
520 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
521 void visitFunction(const Function &F);
522 void visitBasicBlock(BasicBlock &BB);
523 void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
524 bool IsAbsoluteSymbol);
525 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
526 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
527 void visitProfMetadata(Instruction &I, MDNode *MD);
528 void visitCallStackMetadata(MDNode *MD);
529 void visitMemProfMetadata(Instruction &I, MDNode *MD);
530 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
531 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
532 void visitAnnotationMetadata(MDNode *Annotation);
533 void visitAliasScopeMetadata(const MDNode *MD);
534 void visitAliasScopeListMetadata(const MDNode *MD);
535 void visitAccessGroupMetadata(const MDNode *MD);
536
537 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
538#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
539#include "llvm/IR/Metadata.def"
540 void visitDIScope(const DIScope &N);
541 void visitDIVariable(const DIVariable &N);
542 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
543 void visitDITemplateParameter(const DITemplateParameter &N);
544
545 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
546
547 void visit(DbgLabelRecord &DLR);
548 void visit(DbgVariableRecord &DVR);
549 // InstVisitor overrides...
551 void visitDbgRecords(Instruction &I);
552 void visit(Instruction &I);
553
554 void visitTruncInst(TruncInst &I);
555 void visitZExtInst(ZExtInst &I);
556 void visitSExtInst(SExtInst &I);
557 void visitFPTruncInst(FPTruncInst &I);
558 void visitFPExtInst(FPExtInst &I);
559 void visitFPToUIInst(FPToUIInst &I);
560 void visitFPToSIInst(FPToSIInst &I);
561 void visitUIToFPInst(UIToFPInst &I);
562 void visitSIToFPInst(SIToFPInst &I);
563 void visitIntToPtrInst(IntToPtrInst &I);
564 void visitPtrToIntInst(PtrToIntInst &I);
565 void visitBitCastInst(BitCastInst &I);
566 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
567 void visitPHINode(PHINode &PN);
568 void visitCallBase(CallBase &Call);
569 void visitUnaryOperator(UnaryOperator &U);
570 void visitBinaryOperator(BinaryOperator &B);
571 void visitICmpInst(ICmpInst &IC);
572 void visitFCmpInst(FCmpInst &FC);
573 void visitExtractElementInst(ExtractElementInst &EI);
574 void visitInsertElementInst(InsertElementInst &EI);
575 void visitShuffleVectorInst(ShuffleVectorInst &EI);
576 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
577 void visitCallInst(CallInst &CI);
578 void visitInvokeInst(InvokeInst &II);
579 void visitGetElementPtrInst(GetElementPtrInst &GEP);
580 void visitLoadInst(LoadInst &LI);
581 void visitStoreInst(StoreInst &SI);
582 void verifyDominatesUse(Instruction &I, unsigned i);
583 void visitInstruction(Instruction &I);
584 void visitTerminator(Instruction &I);
585 void visitBranchInst(BranchInst &BI);
586 void visitReturnInst(ReturnInst &RI);
587 void visitSwitchInst(SwitchInst &SI);
588 void visitIndirectBrInst(IndirectBrInst &BI);
589 void visitCallBrInst(CallBrInst &CBI);
590 void visitSelectInst(SelectInst &SI);
591 void visitUserOp1(Instruction &I);
592 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
593 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
594 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
595 void visitVPIntrinsic(VPIntrinsic &VPI);
596 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
597 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
598 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
599 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
600 void visitFenceInst(FenceInst &FI);
601 void visitAllocaInst(AllocaInst &AI);
602 void visitExtractValueInst(ExtractValueInst &EVI);
603 void visitInsertValueInst(InsertValueInst &IVI);
604 void visitEHPadPredecessors(Instruction &I);
605 void visitLandingPadInst(LandingPadInst &LPI);
606 void visitResumeInst(ResumeInst &RI);
607 void visitCatchPadInst(CatchPadInst &CPI);
608 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
609 void visitCleanupPadInst(CleanupPadInst &CPI);
610 void visitFuncletPadInst(FuncletPadInst &FPI);
611 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
612 void visitCleanupReturnInst(CleanupReturnInst &CRI);
613
614 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
615 void verifySwiftErrorValue(const Value *SwiftErrorVal);
616 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
617 void verifyMustTailCall(CallInst &CI);
618 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
619 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
620 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
621 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
622 const Value *V);
623 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
624 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
625 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
626
627 void visitConstantExprsRecursively(const Constant *EntryC);
628 void visitConstantExpr(const ConstantExpr *CE);
629 void verifyInlineAsmCall(const CallBase &Call);
630 void verifyStatepoint(const CallBase &Call);
631 void verifyFrameRecoverIndices();
632 void verifySiblingFuncletUnwinds();
633
634 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
635 void verifyFragmentExpression(const DbgVariableRecord &I);
636 template <typename ValueOrMetadata>
637 void verifyFragmentExpression(const DIVariable &V,
639 ValueOrMetadata *Desc);
640 void verifyFnArgs(const DbgVariableIntrinsic &I);
641 void verifyFnArgs(const DbgVariableRecord &DVR);
642 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
643 void verifyNotEntryValue(const DbgVariableRecord &I);
644
645 /// Module-level debug info verification...
646 void verifyCompileUnits();
647
648 /// Module-level verification that all @llvm.experimental.deoptimize
649 /// declarations share the same calling convention.
650 void verifyDeoptimizeCallingConvs();
651
652 void verifyAttachedCallBundle(const CallBase &Call,
653 const OperandBundleUse &BU);
654
655 /// Verify the llvm.experimental.noalias.scope.decl declarations
656 void verifyNoAliasScopeDecl();
657};
658
659} // end anonymous namespace
660
661/// We know that cond should be true, if not print an error message.
662#define Check(C, ...) \
663 do { \
664 if (!(C)) { \
665 CheckFailed(__VA_ARGS__); \
666 return; \
667 } \
668 } while (false)
669
670/// We know that a debug info condition should be true, if not print
671/// an error message.
672#define CheckDI(C, ...) \
673 do { \
674 if (!(C)) { \
675 DebugInfoCheckFailed(__VA_ARGS__); \
676 return; \
677 } \
678 } while (false)
679
680void Verifier::visitDbgRecords(Instruction &I) {
681 if (!I.DebugMarker)
682 return;
683 CheckDI(I.DebugMarker->MarkedInstr == &I,
684 "Instruction has invalid DebugMarker", &I);
685 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
686 "PHI Node must not have any attached DbgRecords", &I);
687 for (DbgRecord &DR : I.getDbgRecordRange()) {
688 CheckDI(DR.getMarker() == I.DebugMarker,
689 "DbgRecord had invalid DebugMarker", &I, &DR);
690 if (auto *Loc =
691 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
692 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
693 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
694 visit(*DVR);
695 // These have to appear after `visit` for consistency with existing
696 // intrinsic behaviour.
697 verifyFragmentExpression(*DVR);
698 verifyNotEntryValue(*DVR);
699 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
700 visit(*DLR);
701 }
702 }
703}
704
705void Verifier::visit(Instruction &I) {
706 visitDbgRecords(I);
707 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
708 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
710}
711
712// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
713static void forEachUser(const Value *User,
715 llvm::function_ref<bool(const Value *)> Callback) {
716 if (!Visited.insert(User).second)
717 return;
718
721 while (!WorkList.empty()) {
722 const Value *Cur = WorkList.pop_back_val();
723 if (!Visited.insert(Cur).second)
724 continue;
725 if (Callback(Cur))
726 append_range(WorkList, Cur->materialized_users());
727 }
728}
729
730void Verifier::visitGlobalValue(const GlobalValue &GV) {
732 "Global is external, but doesn't have external or weak linkage!", &GV);
733
734 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
735
736 if (MaybeAlign A = GO->getAlign()) {
737 Check(A->value() <= Value::MaximumAlignment,
738 "huge alignment values are unsupported", GO);
739 }
740
741 if (const MDNode *Associated =
742 GO->getMetadata(LLVMContext::MD_associated)) {
743 Check(Associated->getNumOperands() == 1,
744 "associated metadata must have one operand", &GV, Associated);
745 const Metadata *Op = Associated->getOperand(0).get();
746 Check(Op, "associated metadata must have a global value", GO, Associated);
747
748 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
749 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
750 if (VM) {
751 Check(isa<PointerType>(VM->getValue()->getType()),
752 "associated value must be pointer typed", GV, Associated);
753
754 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
755 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
756 "associated metadata must point to a GlobalObject", GO, Stripped);
757 Check(Stripped != GO,
758 "global values should not associate to themselves", GO,
759 Associated);
760 }
761 }
762
763 // FIXME: Why is getMetadata on GlobalValue protected?
764 if (const MDNode *AbsoluteSymbol =
765 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
766 verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
767 true);
768 }
769 }
770
771 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
772 "Only global variables can have appending linkage!", &GV);
773
774 if (GV.hasAppendingLinkage()) {
775 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
776 Check(GVar && GVar->getValueType()->isArrayTy(),
777 "Only global arrays can have appending linkage!", GVar);
778 }
779
780 if (GV.isDeclarationForLinker())
781 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
782
783 if (GV.hasDLLExportStorageClass()) {
785 "dllexport GlobalValue must have default or protected visibility",
786 &GV);
787 }
788 if (GV.hasDLLImportStorageClass()) {
790 "dllimport GlobalValue must have default visibility", &GV);
791 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
792 &GV);
793
794 Check((GV.isDeclaration() &&
797 "Global is marked as dllimport, but not external", &GV);
798 }
799
800 if (GV.isImplicitDSOLocal())
801 Check(GV.isDSOLocal(),
802 "GlobalValue with local linkage or non-default "
803 "visibility must be dso_local!",
804 &GV);
805
806 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
807 if (const Instruction *I = dyn_cast<Instruction>(V)) {
808 if (!I->getParent() || !I->getParent()->getParent())
809 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
810 I);
811 else if (I->getParent()->getParent()->getParent() != &M)
812 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
813 I->getParent()->getParent(),
814 I->getParent()->getParent()->getParent());
815 return false;
816 } else if (const Function *F = dyn_cast<Function>(V)) {
817 if (F->getParent() != &M)
818 CheckFailed("Global is used by function in a different module", &GV, &M,
819 F, F->getParent());
820 return false;
821 }
822 return true;
823 });
824}
825
826void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
827 if (GV.hasInitializer()) {
829 "Global variable initializer type does not match global "
830 "variable type!",
831 &GV);
832 // If the global has common linkage, it must have a zero initializer and
833 // cannot be constant.
834 if (GV.hasCommonLinkage()) {
836 "'common' global must have a zero initializer!", &GV);
837 Check(!GV.isConstant(), "'common' global may not be marked constant!",
838 &GV);
839 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
840 }
841 }
842
843 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
844 GV.getName() == "llvm.global_dtors")) {
846 "invalid linkage for intrinsic global variable", &GV);
848 "invalid uses of intrinsic global variable", &GV);
849
850 // Don't worry about emitting an error for it not being an array,
851 // visitGlobalValue will complain on appending non-array.
852 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
853 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
854 PointerType *FuncPtrTy =
855 PointerType::get(Context, DL.getProgramAddressSpace());
856 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
857 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
858 STy->getTypeAtIndex(1) == FuncPtrTy,
859 "wrong type for intrinsic global variable", &GV);
860 Check(STy->getNumElements() == 3,
861 "the third field of the element type is mandatory, "
862 "specify ptr null to migrate from the obsoleted 2-field form");
863 Type *ETy = STy->getTypeAtIndex(2);
864 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
865 &GV);
866 }
867 }
868
869 if (GV.hasName() && (GV.getName() == "llvm.used" ||
870 GV.getName() == "llvm.compiler.used")) {
872 "invalid linkage for intrinsic global variable", &GV);
874 "invalid uses of intrinsic global variable", &GV);
875
876 Type *GVType = GV.getValueType();
877 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
878 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
879 Check(PTy, "wrong type for intrinsic global variable", &GV);
880 if (GV.hasInitializer()) {
881 const Constant *Init = GV.getInitializer();
882 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
883 Check(InitArray, "wrong initalizer for intrinsic global variable",
884 Init);
885 for (Value *Op : InitArray->operands()) {
886 Value *V = Op->stripPointerCasts();
887 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
888 isa<GlobalAlias>(V),
889 Twine("invalid ") + GV.getName() + " member", V);
890 Check(V->hasName(),
891 Twine("members of ") + GV.getName() + " must be named", V);
892 }
893 }
894 }
895 }
896
897 // Visit any debug info attachments.
899 GV.getMetadata(LLVMContext::MD_dbg, MDs);
900 for (auto *MD : MDs) {
901 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
902 visitDIGlobalVariableExpression(*GVE);
903 else
904 CheckDI(false, "!dbg attachment of global variable must be a "
905 "DIGlobalVariableExpression");
906 }
907
908 // Scalable vectors cannot be global variables, since we don't know
909 // the runtime size.
911 "Globals cannot contain scalable types", &GV);
912
913 // Check if it's a target extension type that disallows being used as a
914 // global.
915 if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
916 Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
917 "Global @" + GV.getName() + " has illegal target extension type",
918 TTy);
919
920 if (!GV.hasInitializer()) {
921 visitGlobalValue(GV);
922 return;
923 }
924
925 // Walk any aggregate initializers looking for bitcasts between address spaces
926 visitConstantExprsRecursively(GV.getInitializer());
927
928 visitGlobalValue(GV);
929}
930
931void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
933 Visited.insert(&GA);
934 visitAliaseeSubExpr(Visited, GA, C);
935}
936
937void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
938 const GlobalAlias &GA, const Constant &C) {
940 Check(isa<GlobalValue>(C) &&
941 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
942 "available_externally alias must point to available_externally "
943 "global value",
944 &GA);
945 }
946 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
948 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
949 &GA);
950 }
951
952 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
953 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
954
955 Check(!GA2->isInterposable(),
956 "Alias cannot point to an interposable alias", &GA);
957 } else {
958 // Only continue verifying subexpressions of GlobalAliases.
959 // Do not recurse into global initializers.
960 return;
961 }
962 }
963
964 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
965 visitConstantExprsRecursively(CE);
966
967 for (const Use &U : C.operands()) {
968 Value *V = &*U;
969 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
970 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
971 else if (const auto *C2 = dyn_cast<Constant>(V))
972 visitAliaseeSubExpr(Visited, GA, *C2);
973 }
974}
975
976void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
978 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
979 "weak_odr, external, or available_externally linkage!",
980 &GA);
981 const Constant *Aliasee = GA.getAliasee();
982 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
983 Check(GA.getType() == Aliasee->getType(),
984 "Alias and aliasee types should match!", &GA);
985
986 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
987 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
988
989 visitAliaseeSubExpr(GA, *Aliasee);
990
991 visitGlobalValue(GA);
992}
993
994void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
996 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
997 "weak_odr, or external linkage!",
998 &GI);
999 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1000 // is a Function definition.
1002 Check(Resolver, "IFunc must have a Function resolver", &GI);
1003 Check(!Resolver->isDeclarationForLinker(),
1004 "IFunc resolver must be a definition", &GI);
1005
1006 // Check that the immediate resolver operand (prior to any bitcasts) has the
1007 // correct type.
1008 const Type *ResolverTy = GI.getResolver()->getType();
1009
1010 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1011 "IFunc resolver must return a pointer", &GI);
1012
1013 const Type *ResolverFuncTy =
1015 Check(ResolverTy == ResolverFuncTy->getPointerTo(GI.getAddressSpace()),
1016 "IFunc resolver has incorrect type", &GI);
1017}
1018
1019void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1020 // There used to be various other llvm.dbg.* nodes, but we don't support
1021 // upgrading them and we want to reserve the namespace for future uses.
1022 if (NMD.getName().starts_with("llvm.dbg."))
1023 CheckDI(NMD.getName() == "llvm.dbg.cu",
1024 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1025 for (const MDNode *MD : NMD.operands()) {
1026 if (NMD.getName() == "llvm.dbg.cu")
1027 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1028
1029 if (!MD)
1030 continue;
1031
1032 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1033 }
1034}
1035
1036void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1037 // Only visit each node once. Metadata can be mutually recursive, so this
1038 // avoids infinite recursion here, as well as being an optimization.
1039 if (!MDNodes.insert(&MD).second)
1040 return;
1041
1042 Check(&MD.getContext() == &Context,
1043 "MDNode context does not match Module context!", &MD);
1044
1045 switch (MD.getMetadataID()) {
1046 default:
1047 llvm_unreachable("Invalid MDNode subclass");
1048 case Metadata::MDTupleKind:
1049 break;
1050#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1051 case Metadata::CLASS##Kind: \
1052 visit##CLASS(cast<CLASS>(MD)); \
1053 break;
1054#include "llvm/IR/Metadata.def"
1055 }
1056
1057 for (const Metadata *Op : MD.operands()) {
1058 if (!Op)
1059 continue;
1060 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1061 &MD, Op);
1062 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1063 "DILocation not allowed within this metadata node", &MD, Op);
1064 if (auto *N = dyn_cast<MDNode>(Op)) {
1065 visitMDNode(*N, AllowLocs);
1066 continue;
1067 }
1068 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1069 visitValueAsMetadata(*V, nullptr);
1070 continue;
1071 }
1072 }
1073
1074 // Check these last, so we diagnose problems in operands first.
1075 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1076 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1077}
1078
1079void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1080 Check(MD.getValue(), "Expected valid value", &MD);
1081 Check(!MD.getValue()->getType()->isMetadataTy(),
1082 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1083
1084 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1085 if (!L)
1086 return;
1087
1088 Check(F, "function-local metadata used outside a function", L);
1089
1090 // If this was an instruction, bb, or argument, verify that it is in the
1091 // function that we expect.
1092 Function *ActualF = nullptr;
1093 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1094 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1095 ActualF = I->getParent()->getParent();
1096 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1097 ActualF = BB->getParent();
1098 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1099 ActualF = A->getParent();
1100 assert(ActualF && "Unimplemented function local metadata case!");
1101
1102 Check(ActualF == F, "function-local metadata used in wrong function", L);
1103}
1104
1105void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1106 for (const ValueAsMetadata *VAM : AL.getArgs())
1107 visitValueAsMetadata(*VAM, F);
1108}
1109
1110void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1111 Metadata *MD = MDV.getMetadata();
1112 if (auto *N = dyn_cast<MDNode>(MD)) {
1113 visitMDNode(*N, AreDebugLocsAllowed::No);
1114 return;
1115 }
1116
1117 // Only visit each node once. Metadata can be mutually recursive, so this
1118 // avoids infinite recursion here, as well as being an optimization.
1119 if (!MDNodes.insert(MD).second)
1120 return;
1121
1122 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1123 visitValueAsMetadata(*V, F);
1124
1125 if (auto *AL = dyn_cast<DIArgList>(MD))
1126 visitDIArgList(*AL, F);
1127}
1128
1129static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1130static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1131static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1132
1133void Verifier::visitDILocation(const DILocation &N) {
1134 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1135 "location requires a valid scope", &N, N.getRawScope());
1136 if (auto *IA = N.getRawInlinedAt())
1137 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1138 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1139 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1140}
1141
1142void Verifier::visitGenericDINode(const GenericDINode &N) {
1143 CheckDI(N.getTag(), "invalid tag", &N);
1144}
1145
1146void Verifier::visitDIScope(const DIScope &N) {
1147 if (auto *F = N.getRawFile())
1148 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1149}
1150
1151void Verifier::visitDISubrange(const DISubrange &N) {
1152 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1153 bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
1154 CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
1155 N.getRawUpperBound(),
1156 "Subrange must contain count or upperBound", &N);
1157 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1158 "Subrange can have any one of count or upperBound", &N);
1159 auto *CBound = N.getRawCountNode();
1160 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1161 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1162 "Count must be signed constant or DIVariable or DIExpression", &N);
1163 auto Count = N.getCount();
1164 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1165 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1166 "invalid subrange count", &N);
1167 auto *LBound = N.getRawLowerBound();
1168 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1169 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1170 "LowerBound must be signed constant or DIVariable or DIExpression",
1171 &N);
1172 auto *UBound = N.getRawUpperBound();
1173 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1174 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1175 "UpperBound must be signed constant or DIVariable or DIExpression",
1176 &N);
1177 auto *Stride = N.getRawStride();
1178 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1179 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1180 "Stride must be signed constant or DIVariable or DIExpression", &N);
1181}
1182
1183void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1184 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1185 CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
1186 "GenericSubrange must contain count or upperBound", &N);
1187 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1188 "GenericSubrange can have any one of count or upperBound", &N);
1189 auto *CBound = N.getRawCountNode();
1190 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1191 "Count must be signed constant or DIVariable or DIExpression", &N);
1192 auto *LBound = N.getRawLowerBound();
1193 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1194 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1195 "LowerBound must be signed constant or DIVariable or DIExpression",
1196 &N);
1197 auto *UBound = N.getRawUpperBound();
1198 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1199 "UpperBound must be signed constant or DIVariable or DIExpression",
1200 &N);
1201 auto *Stride = N.getRawStride();
1202 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1203 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1204 "Stride must be signed constant or DIVariable or DIExpression", &N);
1205}
1206
1207void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1208 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1209}
1210
1211void Verifier::visitDIBasicType(const DIBasicType &N) {
1212 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1213 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1214 N.getTag() == dwarf::DW_TAG_string_type,
1215 "invalid tag", &N);
1216}
1217
1218void Verifier::visitDIStringType(const DIStringType &N) {
1219 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1220 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1221 &N);
1222}
1223
1224void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1225 // Common scope checks.
1226 visitDIScope(N);
1227
1228 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1229 N.getTag() == dwarf::DW_TAG_pointer_type ||
1230 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1231 N.getTag() == dwarf::DW_TAG_reference_type ||
1232 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1233 N.getTag() == dwarf::DW_TAG_const_type ||
1234 N.getTag() == dwarf::DW_TAG_immutable_type ||
1235 N.getTag() == dwarf::DW_TAG_volatile_type ||
1236 N.getTag() == dwarf::DW_TAG_restrict_type ||
1237 N.getTag() == dwarf::DW_TAG_atomic_type ||
1238 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1239 N.getTag() == dwarf::DW_TAG_member ||
1240 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1241 N.getTag() == dwarf::DW_TAG_inheritance ||
1242 N.getTag() == dwarf::DW_TAG_friend ||
1243 N.getTag() == dwarf::DW_TAG_set_type ||
1244 N.getTag() == dwarf::DW_TAG_template_alias,
1245 "invalid tag", &N);
1246 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1247 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1248 N.getRawExtraData());
1249 }
1250
1251 if (N.getTag() == dwarf::DW_TAG_set_type) {
1252 if (auto *T = N.getRawBaseType()) {
1253 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1254 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1255 CheckDI(
1256 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1257 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1258 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1259 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1260 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1261 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1262 "invalid set base type", &N, T);
1263 }
1264 }
1265
1266 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1267 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1268 N.getRawBaseType());
1269
1270 if (N.getDWARFAddressSpace()) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1272 N.getTag() == dwarf::DW_TAG_reference_type ||
1273 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1274 "DWARF address space only applies to pointer or reference types",
1275 &N);
1276 }
1277}
1278
1279/// Detect mutually exclusive flags.
1280static bool hasConflictingReferenceFlags(unsigned Flags) {
1281 return ((Flags & DINode::FlagLValueReference) &&
1282 (Flags & DINode::FlagRValueReference)) ||
1283 ((Flags & DINode::FlagTypePassByValue) &&
1284 (Flags & DINode::FlagTypePassByReference));
1285}
1286
1287void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1288 auto *Params = dyn_cast<MDTuple>(&RawParams);
1289 CheckDI(Params, "invalid template params", &N, &RawParams);
1290 for (Metadata *Op : Params->operands()) {
1291 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1292 &N, Params, Op);
1293 }
1294}
1295
1296void Verifier::visitDICompositeType(const DICompositeType &N) {
1297 // Common scope checks.
1298 visitDIScope(N);
1299
1300 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1301 N.getTag() == dwarf::DW_TAG_structure_type ||
1302 N.getTag() == dwarf::DW_TAG_union_type ||
1303 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1304 N.getTag() == dwarf::DW_TAG_class_type ||
1305 N.getTag() == dwarf::DW_TAG_variant_part ||
1306 N.getTag() == dwarf::DW_TAG_namelist,
1307 "invalid tag", &N);
1308
1309 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1310 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1311 N.getRawBaseType());
1312
1313 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1314 "invalid composite elements", &N, N.getRawElements());
1315 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1316 N.getRawVTableHolder());
1318 "invalid reference flags", &N);
1319 unsigned DIBlockByRefStruct = 1 << 4;
1320 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1321 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1322
1323 if (N.isVector()) {
1324 const DINodeArray Elements = N.getElements();
1325 CheckDI(Elements.size() == 1 &&
1326 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1327 "invalid vector, expected one element of type subrange", &N);
1328 }
1329
1330 if (auto *Params = N.getRawTemplateParams())
1331 visitTemplateParams(N, *Params);
1332
1333 if (auto *D = N.getRawDiscriminator()) {
1334 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1335 "discriminator can only appear on variant part");
1336 }
1337
1338 if (N.getRawDataLocation()) {
1339 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1340 "dataLocation can only appear in array type");
1341 }
1342
1343 if (N.getRawAssociated()) {
1344 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1345 "associated can only appear in array type");
1346 }
1347
1348 if (N.getRawAllocated()) {
1349 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1350 "allocated can only appear in array type");
1351 }
1352
1353 if (N.getRawRank()) {
1354 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1355 "rank can only appear in array type");
1356 }
1357
1358 if (N.getTag() == dwarf::DW_TAG_array_type) {
1359 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1360 }
1361}
1362
1363void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1364 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1365 if (auto *Types = N.getRawTypeArray()) {
1366 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1367 for (Metadata *Ty : N.getTypeArray()->operands()) {
1368 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1369 }
1370 }
1372 "invalid reference flags", &N);
1373}
1374
1375void Verifier::visitDIFile(const DIFile &N) {
1376 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1377 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1378 if (Checksum) {
1379 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1380 "invalid checksum kind", &N);
1381 size_t Size;
1382 switch (Checksum->Kind) {
1383 case DIFile::CSK_MD5:
1384 Size = 32;
1385 break;
1386 case DIFile::CSK_SHA1:
1387 Size = 40;
1388 break;
1389 case DIFile::CSK_SHA256:
1390 Size = 64;
1391 break;
1392 }
1393 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1394 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1395 "invalid checksum", &N);
1396 }
1397}
1398
1399void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1400 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1401 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1402
1403 // Don't bother verifying the compilation directory or producer string
1404 // as those could be empty.
1405 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1406 N.getRawFile());
1407 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1408 N.getFile());
1409
1410 CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
1411
1412 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1413 "invalid emission kind", &N);
1414
1415 if (auto *Array = N.getRawEnumTypes()) {
1416 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1417 for (Metadata *Op : N.getEnumTypes()->operands()) {
1418 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1419 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1420 "invalid enum type", &N, N.getEnumTypes(), Op);
1421 }
1422 }
1423 if (auto *Array = N.getRawRetainedTypes()) {
1424 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1425 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1426 CheckDI(
1427 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1428 !cast<DISubprogram>(Op)->isDefinition())),
1429 "invalid retained type", &N, Op);
1430 }
1431 }
1432 if (auto *Array = N.getRawGlobalVariables()) {
1433 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1434 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1435 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1436 "invalid global variable ref", &N, Op);
1437 }
1438 }
1439 if (auto *Array = N.getRawImportedEntities()) {
1440 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1441 for (Metadata *Op : N.getImportedEntities()->operands()) {
1442 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1443 &N, Op);
1444 }
1445 }
1446 if (auto *Array = N.getRawMacros()) {
1447 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1448 for (Metadata *Op : N.getMacros()->operands()) {
1449 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1450 }
1451 }
1452 CUVisited.insert(&N);
1453}
1454
1455void Verifier::visitDISubprogram(const DISubprogram &N) {
1456 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1457 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1458 if (auto *F = N.getRawFile())
1459 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1460 else
1461 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1462 if (auto *T = N.getRawType())
1463 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1464 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1465 N.getRawContainingType());
1466 if (auto *Params = N.getRawTemplateParams())
1467 visitTemplateParams(N, *Params);
1468 if (auto *S = N.getRawDeclaration())
1469 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1470 "invalid subprogram declaration", &N, S);
1471 if (auto *RawNode = N.getRawRetainedNodes()) {
1472 auto *Node = dyn_cast<MDTuple>(RawNode);
1473 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1474 for (Metadata *Op : Node->operands()) {
1475 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1476 isa<DIImportedEntity>(Op)),
1477 "invalid retained nodes, expected DILocalVariable, DILabel or "
1478 "DIImportedEntity",
1479 &N, Node, Op);
1480 }
1481 }
1483 "invalid reference flags", &N);
1484
1485 auto *Unit = N.getRawUnit();
1486 if (N.isDefinition()) {
1487 // Subprogram definitions (not part of the type hierarchy).
1488 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1489 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1490 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1491 // There's no good way to cross the CU boundary to insert a nested
1492 // DISubprogram definition in one CU into a type defined in another CU.
1493 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1494 if (CT && CT->getRawIdentifier() &&
1495 M.getContext().isODRUniquingDebugTypes())
1496 CheckDI(N.getDeclaration(),
1497 "definition subprograms cannot be nested within DICompositeType "
1498 "when enabling ODR",
1499 &N);
1500 } else {
1501 // Subprogram declarations (part of the type hierarchy).
1502 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1503 CheckDI(!N.getRawDeclaration(),
1504 "subprogram declaration must not have a declaration field");
1505 }
1506
1507 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1508 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1509 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1510 for (Metadata *Op : ThrownTypes->operands())
1511 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1512 Op);
1513 }
1514
1515 if (N.areAllCallsDescribed())
1516 CheckDI(N.isDefinition(),
1517 "DIFlagAllCallsDescribed must be attached to a definition");
1518}
1519
1520void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1521 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1522 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1523 "invalid local scope", &N, N.getRawScope());
1524 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1525 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1526}
1527
1528void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1529 visitDILexicalBlockBase(N);
1530
1531 CheckDI(N.getLine() || !N.getColumn(),
1532 "cannot have column info without line info", &N);
1533}
1534
1535void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1536 visitDILexicalBlockBase(N);
1537}
1538
1539void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1540 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1541 if (auto *S = N.getRawScope())
1542 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1543 if (auto *S = N.getRawDecl())
1544 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1545}
1546
1547void Verifier::visitDINamespace(const DINamespace &N) {
1548 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1549 if (auto *S = N.getRawScope())
1550 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1551}
1552
1553void Verifier::visitDIMacro(const DIMacro &N) {
1554 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1555 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1556 "invalid macinfo type", &N);
1557 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1558 if (!N.getValue().empty()) {
1559 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1560 }
1561}
1562
1563void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1564 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1565 "invalid macinfo type", &N);
1566 if (auto *F = N.getRawFile())
1567 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1568
1569 if (auto *Array = N.getRawElements()) {
1570 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1571 for (Metadata *Op : N.getElements()->operands()) {
1572 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1573 }
1574 }
1575}
1576
1577void Verifier::visitDIModule(const DIModule &N) {
1578 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1579 CheckDI(!N.getName().empty(), "anonymous module", &N);
1580}
1581
1582void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1583 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1584}
1585
1586void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1587 visitDITemplateParameter(N);
1588
1589 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1590 &N);
1591}
1592
1593void Verifier::visitDITemplateValueParameter(
1594 const DITemplateValueParameter &N) {
1595 visitDITemplateParameter(N);
1596
1597 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1598 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1599 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1600 "invalid tag", &N);
1601}
1602
1603void Verifier::visitDIVariable(const DIVariable &N) {
1604 if (auto *S = N.getRawScope())
1605 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1606 if (auto *F = N.getRawFile())
1607 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1608}
1609
1610void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1611 // Checks common to all variables.
1612 visitDIVariable(N);
1613
1614 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1615 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1616 // Check only if the global variable is not an extern
1617 if (N.isDefinition())
1618 CheckDI(N.getType(), "missing global variable type", &N);
1619 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1620 CheckDI(isa<DIDerivedType>(Member),
1621 "invalid static data member declaration", &N, Member);
1622 }
1623}
1624
1625void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1626 // Checks common to all variables.
1627 visitDIVariable(N);
1628
1629 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1630 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1631 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1632 "local variable requires a valid scope", &N, N.getRawScope());
1633 if (auto Ty = N.getType())
1634 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1635}
1636
1637void Verifier::visitDIAssignID(const DIAssignID &N) {
1638 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1639 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1640}
1641
1642void Verifier::visitDILabel(const DILabel &N) {
1643 if (auto *S = N.getRawScope())
1644 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1645 if (auto *F = N.getRawFile())
1646 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1647
1648 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1649 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1650 "label requires a valid scope", &N, N.getRawScope());
1651}
1652
1653void Verifier::visitDIExpression(const DIExpression &N) {
1654 CheckDI(N.isValid(), "invalid expression", &N);
1655}
1656
1657void Verifier::visitDIGlobalVariableExpression(
1658 const DIGlobalVariableExpression &GVE) {
1659 CheckDI(GVE.getVariable(), "missing variable");
1660 if (auto *Var = GVE.getVariable())
1661 visitDIGlobalVariable(*Var);
1662 if (auto *Expr = GVE.getExpression()) {
1663 visitDIExpression(*Expr);
1664 if (auto Fragment = Expr->getFragmentInfo())
1665 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1666 }
1667}
1668
1669void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1670 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1671 if (auto *T = N.getRawType())
1672 CheckDI(isType(T), "invalid type ref", &N, T);
1673 if (auto *F = N.getRawFile())
1674 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1675}
1676
1677void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1678 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1679 N.getTag() == dwarf::DW_TAG_imported_declaration,
1680 "invalid tag", &N);
1681 if (auto *S = N.getRawScope())
1682 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1683 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1684 N.getRawEntity());
1685}
1686
1687void Verifier::visitComdat(const Comdat &C) {
1688 // In COFF the Module is invalid if the GlobalValue has private linkage.
1689 // Entities with private linkage don't have entries in the symbol table.
1690 if (TT.isOSBinFormatCOFF())
1691 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1692 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1693 GV);
1694}
1695
1696void Verifier::visitModuleIdents() {
1697 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1698 if (!Idents)
1699 return;
1700
1701 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1702 // Scan each llvm.ident entry and make sure that this requirement is met.
1703 for (const MDNode *N : Idents->operands()) {
1704 Check(N->getNumOperands() == 1,
1705 "incorrect number of operands in llvm.ident metadata", N);
1706 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1707 ("invalid value for llvm.ident metadata entry operand"
1708 "(the operand should be a string)"),
1709 N->getOperand(0));
1710 }
1711}
1712
1713void Verifier::visitModuleCommandLines() {
1714 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1715 if (!CommandLines)
1716 return;
1717
1718 // llvm.commandline takes a list of metadata entry. Each entry has only one
1719 // string. Scan each llvm.commandline entry and make sure that this
1720 // requirement is met.
1721 for (const MDNode *N : CommandLines->operands()) {
1722 Check(N->getNumOperands() == 1,
1723 "incorrect number of operands in llvm.commandline metadata", N);
1724 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1725 ("invalid value for llvm.commandline metadata entry operand"
1726 "(the operand should be a string)"),
1727 N->getOperand(0));
1728 }
1729}
1730
1731void Verifier::visitModuleFlags() {
1732 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1733 if (!Flags) return;
1734
1735 // Scan each flag, and track the flags and requirements.
1737 SmallVector<const MDNode*, 16> Requirements;
1738 uint64_t PAuthABIPlatform = -1;
1739 uint64_t PAuthABIVersion = -1;
1740 for (const MDNode *MDN : Flags->operands()) {
1741 visitModuleFlag(MDN, SeenIDs, Requirements);
1742 if (MDN->getNumOperands() != 3)
1743 continue;
1744 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1745 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1746 if (const auto *PAP =
1747 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1748 PAuthABIPlatform = PAP->getZExtValue();
1749 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1750 if (const auto *PAV =
1751 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1752 PAuthABIVersion = PAV->getZExtValue();
1753 }
1754 }
1755 }
1756
1757 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1758 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1759 "'aarch64-elf-pauthabi-version' module flags must be present");
1760
1761 // Validate that the requirements in the module are valid.
1762 for (const MDNode *Requirement : Requirements) {
1763 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1764 const Metadata *ReqValue = Requirement->getOperand(1);
1765
1766 const MDNode *Op = SeenIDs.lookup(Flag);
1767 if (!Op) {
1768 CheckFailed("invalid requirement on flag, flag is not present in module",
1769 Flag);
1770 continue;
1771 }
1772
1773 if (Op->getOperand(2) != ReqValue) {
1774 CheckFailed(("invalid requirement on flag, "
1775 "flag does not have the required value"),
1776 Flag);
1777 continue;
1778 }
1779 }
1780}
1781
1782void
1783Verifier::visitModuleFlag(const MDNode *Op,
1785 SmallVectorImpl<const MDNode *> &Requirements) {
1786 // Each module flag should have three arguments, the merge behavior (a
1787 // constant int), the flag ID (an MDString), and the value.
1788 Check(Op->getNumOperands() == 3,
1789 "incorrect number of operands in module flag", Op);
1791 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1792 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1793 "invalid behavior operand in module flag (expected constant integer)",
1794 Op->getOperand(0));
1795 Check(false,
1796 "invalid behavior operand in module flag (unexpected constant)",
1797 Op->getOperand(0));
1798 }
1799 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1800 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1801 Op->getOperand(1));
1802
1803 // Check the values for behaviors with additional requirements.
1804 switch (MFB) {
1805 case Module::Error:
1806 case Module::Warning:
1807 case Module::Override:
1808 // These behavior types accept any value.
1809 break;
1810
1811 case Module::Min: {
1812 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1813 Check(V && V->getValue().isNonNegative(),
1814 "invalid value for 'min' module flag (expected constant non-negative "
1815 "integer)",
1816 Op->getOperand(2));
1817 break;
1818 }
1819
1820 case Module::Max: {
1821 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1822 "invalid value for 'max' module flag (expected constant integer)",
1823 Op->getOperand(2));
1824 break;
1825 }
1826
1827 case Module::Require: {
1828 // The value should itself be an MDNode with two operands, a flag ID (an
1829 // MDString), and a value.
1830 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1831 Check(Value && Value->getNumOperands() == 2,
1832 "invalid value for 'require' module flag (expected metadata pair)",
1833 Op->getOperand(2));
1834 Check(isa<MDString>(Value->getOperand(0)),
1835 ("invalid value for 'require' module flag "
1836 "(first value operand should be a string)"),
1837 Value->getOperand(0));
1838
1839 // Append it to the list of requirements, to check once all module flags are
1840 // scanned.
1841 Requirements.push_back(Value);
1842 break;
1843 }
1844
1845 case Module::Append:
1846 case Module::AppendUnique: {
1847 // These behavior types require the operand be an MDNode.
1848 Check(isa<MDNode>(Op->getOperand(2)),
1849 "invalid value for 'append'-type module flag "
1850 "(expected a metadata node)",
1851 Op->getOperand(2));
1852 break;
1853 }
1854 }
1855
1856 // Unless this is a "requires" flag, check the ID is unique.
1857 if (MFB != Module::Require) {
1858 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1859 Check(Inserted,
1860 "module flag identifiers must be unique (or of 'require' type)", ID);
1861 }
1862
1863 if (ID->getString() == "wchar_size") {
1865 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1866 Check(Value, "wchar_size metadata requires constant integer argument");
1867 }
1868
1869 if (ID->getString() == "Linker Options") {
1870 // If the llvm.linker.options named metadata exists, we assume that the
1871 // bitcode reader has upgraded the module flag. Otherwise the flag might
1872 // have been created by a client directly.
1873 Check(M.getNamedMetadata("llvm.linker.options"),
1874 "'Linker Options' named metadata no longer supported");
1875 }
1876
1877 if (ID->getString() == "SemanticInterposition") {
1879 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1880 Check(Value,
1881 "SemanticInterposition metadata requires constant integer argument");
1882 }
1883
1884 if (ID->getString() == "CG Profile") {
1885 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1886 visitModuleFlagCGProfileEntry(MDO);
1887 }
1888}
1889
1890void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1891 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1892 if (!FuncMDO)
1893 return;
1894 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1895 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1896 "expected a Function or null", FuncMDO);
1897 };
1898 auto Node = dyn_cast_or_null<MDNode>(MDO);
1899 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1900 CheckFunction(Node->getOperand(0));
1901 CheckFunction(Node->getOperand(1));
1902 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1903 Check(Count && Count->getType()->isIntegerTy(),
1904 "expected an integer constant", Node->getOperand(2));
1905}
1906
1907void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1908 for (Attribute A : Attrs) {
1909
1910 if (A.isStringAttribute()) {
1911#define GET_ATTR_NAMES
1912#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1913#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1914 if (A.getKindAsString() == #DISPLAY_NAME) { \
1915 auto V = A.getValueAsString(); \
1916 if (!(V.empty() || V == "true" || V == "false")) \
1917 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1918 ""); \
1919 }
1920
1921#include "llvm/IR/Attributes.inc"
1922 continue;
1923 }
1924
1925 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1926 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1927 V);
1928 return;
1929 }
1930 }
1931}
1932
1933// VerifyParameterAttrs - Check the given attributes for an argument or return
1934// value of the specified type. The value V is printed in error messages.
1935void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1936 const Value *V) {
1937 if (!Attrs.hasAttributes())
1938 return;
1939
1940 verifyAttributeTypes(Attrs, V);
1941
1942 for (Attribute Attr : Attrs)
1943 Check(Attr.isStringAttribute() ||
1944 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1945 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1946 V);
1947
1948 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1949 Check(Attrs.getNumAttributes() == 1,
1950 "Attribute 'immarg' is incompatible with other attributes", V);
1951 }
1952
1953 // Check for mutually incompatible attributes. Only inreg is compatible with
1954 // sret.
1955 unsigned AttrCount = 0;
1956 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1957 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1958 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1959 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1960 Attrs.hasAttribute(Attribute::InReg);
1961 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1962 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1963 Check(AttrCount <= 1,
1964 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1965 "'byref', and 'sret' are incompatible!",
1966 V);
1967
1968 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1969 Attrs.hasAttribute(Attribute::ReadOnly)),
1970 "Attributes "
1971 "'inalloca and readonly' are incompatible!",
1972 V);
1973
1974 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1975 Attrs.hasAttribute(Attribute::Returned)),
1976 "Attributes "
1977 "'sret and returned' are incompatible!",
1978 V);
1979
1980 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1981 Attrs.hasAttribute(Attribute::SExt)),
1982 "Attributes "
1983 "'zeroext and signext' are incompatible!",
1984 V);
1985
1986 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1987 Attrs.hasAttribute(Attribute::ReadOnly)),
1988 "Attributes "
1989 "'readnone and readonly' are incompatible!",
1990 V);
1991
1992 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1993 Attrs.hasAttribute(Attribute::WriteOnly)),
1994 "Attributes "
1995 "'readnone and writeonly' are incompatible!",
1996 V);
1997
1998 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1999 Attrs.hasAttribute(Attribute::WriteOnly)),
2000 "Attributes "
2001 "'readonly and writeonly' are incompatible!",
2002 V);
2003
2004 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2005 Attrs.hasAttribute(Attribute::AlwaysInline)),
2006 "Attributes "
2007 "'noinline and alwaysinline' are incompatible!",
2008 V);
2009
2010 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2011 Attrs.hasAttribute(Attribute::ReadNone)),
2012 "Attributes writable and readnone are incompatible!", V);
2013
2014 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2015 Attrs.hasAttribute(Attribute::ReadOnly)),
2016 "Attributes writable and readonly are incompatible!", V);
2017
2018 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
2019 for (Attribute Attr : Attrs) {
2020 if (!Attr.isStringAttribute() &&
2021 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2022 CheckFailed("Attribute '" + Attr.getAsString() +
2023 "' applied to incompatible type!", V);
2024 return;
2025 }
2026 }
2027
2028 if (isa<PointerType>(Ty)) {
2029 if (Attrs.hasAttribute(Attribute::ByVal)) {
2030 if (Attrs.hasAttribute(Attribute::Alignment)) {
2031 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2032 Align MaxAlign(ParamMaxAlignment);
2033 Check(AttrAlign <= MaxAlign,
2034 "Attribute 'align' exceed the max size 2^14", V);
2035 }
2036 SmallPtrSet<Type *, 4> Visited;
2037 Check(Attrs.getByValType()->isSized(&Visited),
2038 "Attribute 'byval' does not support unsized types!", V);
2039 }
2040 if (Attrs.hasAttribute(Attribute::ByRef)) {
2041 SmallPtrSet<Type *, 4> Visited;
2042 Check(Attrs.getByRefType()->isSized(&Visited),
2043 "Attribute 'byref' does not support unsized types!", V);
2044 }
2045 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2046 SmallPtrSet<Type *, 4> Visited;
2047 Check(Attrs.getInAllocaType()->isSized(&Visited),
2048 "Attribute 'inalloca' does not support unsized types!", V);
2049 }
2050 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2051 SmallPtrSet<Type *, 4> Visited;
2052 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2053 "Attribute 'preallocated' does not support unsized types!", V);
2054 }
2055 }
2056
2057 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2058 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2059 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2060 V);
2061 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2062 "Invalid value for 'nofpclass' test mask", V);
2063 }
2064 if (Attrs.hasAttribute(Attribute::Range)) {
2065 auto CR = Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2066 Check(Ty->isIntOrIntVectorTy(CR.getBitWidth()),
2067 "Range bit width must match type bit width!", V);
2068 }
2069}
2070
2071void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2072 const Value *V) {
2073 if (Attrs.hasFnAttr(Attr)) {
2074 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2075 unsigned N;
2076 if (S.getAsInteger(10, N))
2077 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2078 }
2079}
2080
2081// Check parameter attributes against a function type.
2082// The value V is printed in error messages.
2083void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2084 const Value *V, bool IsIntrinsic,
2085 bool IsInlineAsm) {
2086 if (Attrs.isEmpty())
2087 return;
2088
2089 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2090 Check(Attrs.hasParentContext(Context),
2091 "Attribute list does not match Module context!", &Attrs, V);
2092 for (const auto &AttrSet : Attrs) {
2093 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2094 "Attribute set does not match Module context!", &AttrSet, V);
2095 for (const auto &A : AttrSet) {
2096 Check(A.hasParentContext(Context),
2097 "Attribute does not match Module context!", &A, V);
2098 }
2099 }
2100 }
2101
2102 bool SawNest = false;
2103 bool SawReturned = false;
2104 bool SawSRet = false;
2105 bool SawSwiftSelf = false;
2106 bool SawSwiftAsync = false;
2107 bool SawSwiftError = false;
2108
2109 // Verify return value attributes.
2110 AttributeSet RetAttrs = Attrs.getRetAttrs();
2111 for (Attribute RetAttr : RetAttrs)
2112 Check(RetAttr.isStringAttribute() ||
2113 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2114 "Attribute '" + RetAttr.getAsString() +
2115 "' does not apply to function return values",
2116 V);
2117
2118 unsigned MaxParameterWidth = 0;
2119 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2120 if (Ty->isVectorTy()) {
2121 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2122 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2123 if (Size > MaxParameterWidth)
2124 MaxParameterWidth = Size;
2125 }
2126 }
2127 };
2128 GetMaxParameterWidth(FT->getReturnType());
2129 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2130
2131 // Verify parameter attributes.
2132 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2133 Type *Ty = FT->getParamType(i);
2134 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2135
2136 if (!IsIntrinsic) {
2137 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2138 "immarg attribute only applies to intrinsics", V);
2139 if (!IsInlineAsm)
2140 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2141 "Attribute 'elementtype' can only be applied to intrinsics"
2142 " and inline asm.",
2143 V);
2144 }
2145
2146 verifyParameterAttrs(ArgAttrs, Ty, V);
2147 GetMaxParameterWidth(Ty);
2148
2149 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2150 Check(!SawNest, "More than one parameter has attribute nest!", V);
2151 SawNest = true;
2152 }
2153
2154 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2155 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2156 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2157 "Incompatible argument and return types for 'returned' attribute",
2158 V);
2159 SawReturned = true;
2160 }
2161
2162 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2163 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2164 Check(i == 0 || i == 1,
2165 "Attribute 'sret' is not on first or second parameter!", V);
2166 SawSRet = true;
2167 }
2168
2169 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2170 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2171 SawSwiftSelf = true;
2172 }
2173
2174 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2175 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2176 SawSwiftAsync = true;
2177 }
2178
2179 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2180 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2181 SawSwiftError = true;
2182 }
2183
2184 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2185 Check(i == FT->getNumParams() - 1,
2186 "inalloca isn't on the last parameter!", V);
2187 }
2188 }
2189
2190 if (!Attrs.hasFnAttrs())
2191 return;
2192
2193 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2194 for (Attribute FnAttr : Attrs.getFnAttrs())
2195 Check(FnAttr.isStringAttribute() ||
2196 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2197 "Attribute '" + FnAttr.getAsString() +
2198 "' does not apply to functions!",
2199 V);
2200
2201 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2202 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2203 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2204
2205 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2206 Check(Attrs.hasFnAttr(Attribute::NoInline),
2207 "Attribute 'optnone' requires 'noinline'!", V);
2208
2209 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2210 "Attributes 'optsize and optnone' are incompatible!", V);
2211
2212 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2213 "Attributes 'minsize and optnone' are incompatible!", V);
2214
2215 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2216 "Attributes 'optdebug and optnone' are incompatible!", V);
2217 }
2218
2219 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2220 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2221 "Attributes 'optsize and optdebug' are incompatible!", V);
2222
2223 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2224 "Attributes 'minsize and optdebug' are incompatible!", V);
2225 }
2226
2227 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2228 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2229 "Attribute writable and memory without argmem: write are incompatible!",
2230 V);
2231
2232 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2233 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2234 "Attributes 'aarch64_pstate_sm_enabled and "
2235 "aarch64_pstate_sm_compatible' are incompatible!",
2236 V);
2237 }
2238
2239 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2240 Attrs.hasFnAttr("aarch64_inout_za") +
2241 Attrs.hasFnAttr("aarch64_out_za") +
2242 Attrs.hasFnAttr("aarch64_preserves_za")) <= 1,
2243 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2244 "'aarch64_inout_za' and 'aarch64_preserves_za' are mutually exclusive",
2245 V);
2246
2247 Check(
2248 (Attrs.hasFnAttr("aarch64_new_zt0") + Attrs.hasFnAttr("aarch64_in_zt0") +
2249 Attrs.hasFnAttr("aarch64_inout_zt0") +
2250 Attrs.hasFnAttr("aarch64_out_zt0") +
2251 Attrs.hasFnAttr("aarch64_preserves_zt0")) <= 1,
2252 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2253 "'aarch64_inout_zt0' and 'aarch64_preserves_zt0' are mutually exclusive",
2254 V);
2255
2256 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2257 const GlobalValue *GV = cast<GlobalValue>(V);
2259 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2260 }
2261
2262 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2263 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2264 if (ParamNo >= FT->getNumParams()) {
2265 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2266 return false;
2267 }
2268
2269 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2270 CheckFailed("'allocsize' " + Name +
2271 " argument must refer to an integer parameter",
2272 V);
2273 return false;
2274 }
2275
2276 return true;
2277 };
2278
2279 if (!CheckParam("element size", Args->first))
2280 return;
2281
2282 if (Args->second && !CheckParam("number of elements", *Args->second))
2283 return;
2284 }
2285
2286 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2287 AllocFnKind K = Attrs.getAllocKind();
2290 if (!is_contained(
2292 Type))
2293 CheckFailed(
2294 "'allockind()' requires exactly one of alloc, realloc, and free");
2295 if ((Type == AllocFnKind::Free) &&
2298 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2299 "or aligned modifiers.");
2301 if ((K & ZeroedUninit) == ZeroedUninit)
2302 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2303 }
2304
2305 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2306 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2307 if (VScaleMin == 0)
2308 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2309 else if (!isPowerOf2_32(VScaleMin))
2310 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2311 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2312 if (VScaleMax && VScaleMin > VScaleMax)
2313 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2314 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2315 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2316 }
2317
2318 if (Attrs.hasFnAttr("frame-pointer")) {
2319 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2320 if (FP != "all" && FP != "non-leaf" && FP != "none")
2321 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2322 }
2323
2324 // Check EVEX512 feature.
2325 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2326 TT.isX86()) {
2327 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2328 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2329 "512-bit vector arguments require 'evex512' for AVX512", V);
2330 }
2331
2332 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2333 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2334 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2335
2336 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2337 StringRef S = A.getValueAsString();
2338 if (S != "none" && S != "all" && S != "non-leaf")
2339 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2340 }
2341
2342 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2343 StringRef S = A.getValueAsString();
2344 if (S != "a_key" && S != "b_key")
2345 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2346 V);
2347 }
2348
2349 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2350 StringRef S = A.getValueAsString();
2351 if (S != "true" && S != "false")
2352 CheckFailed(
2353 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2354 }
2355
2356 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2357 StringRef S = A.getValueAsString();
2358 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2359 if (!Info)
2360 CheckFailed("invalid name for a VFABI variant: " + S, V);
2361 }
2362}
2363
2364void Verifier::verifyFunctionMetadata(
2365 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2366 for (const auto &Pair : MDs) {
2367 if (Pair.first == LLVMContext::MD_prof) {
2368 MDNode *MD = Pair.second;
2369 Check(MD->getNumOperands() >= 2,
2370 "!prof annotations should have no less than 2 operands", MD);
2371
2372 // Check first operand.
2373 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2374 MD);
2375 Check(isa<MDString>(MD->getOperand(0)),
2376 "expected string with name of the !prof annotation", MD);
2377 MDString *MDS = cast<MDString>(MD->getOperand(0));
2378 StringRef ProfName = MDS->getString();
2379 Check(ProfName.equals("function_entry_count") ||
2380 ProfName.equals("synthetic_function_entry_count"),
2381 "first operand should be 'function_entry_count'"
2382 " or 'synthetic_function_entry_count'",
2383 MD);
2384
2385 // Check second operand.
2386 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2387 MD);
2388 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2389 "expected integer argument to function_entry_count", MD);
2390 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2391 MDNode *MD = Pair.second;
2392 Check(MD->getNumOperands() == 1,
2393 "!kcfi_type must have exactly one operand", MD);
2394 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2395 MD);
2396 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2397 "expected a constant operand for !kcfi_type", MD);
2398 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2399 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2400 "expected a constant integer operand for !kcfi_type", MD);
2401 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2402 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2403 }
2404 }
2405}
2406
2407void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2408 if (!ConstantExprVisited.insert(EntryC).second)
2409 return;
2410
2412 Stack.push_back(EntryC);
2413
2414 while (!Stack.empty()) {
2415 const Constant *C = Stack.pop_back_val();
2416
2417 // Check this constant expression.
2418 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2419 visitConstantExpr(CE);
2420
2421 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2422 // Global Values get visited separately, but we do need to make sure
2423 // that the global value is in the correct module
2424 Check(GV->getParent() == &M, "Referencing global in another module!",
2425 EntryC, &M, GV, GV->getParent());
2426 continue;
2427 }
2428
2429 // Visit all sub-expressions.
2430 for (const Use &U : C->operands()) {
2431 const auto *OpC = dyn_cast<Constant>(U);
2432 if (!OpC)
2433 continue;
2434 if (!ConstantExprVisited.insert(OpC).second)
2435 continue;
2436 Stack.push_back(OpC);
2437 }
2438 }
2439}
2440
2441void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2442 if (CE->getOpcode() == Instruction::BitCast)
2443 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2444 CE->getType()),
2445 "Invalid bitcast", CE);
2446}
2447
2448bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2449 // There shouldn't be more attribute sets than there are parameters plus the
2450 // function and return value.
2451 return Attrs.getNumAttrSets() <= Params + 2;
2452}
2453
2454void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2455 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2456 unsigned ArgNo = 0;
2457 unsigned LabelNo = 0;
2458 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2459 if (CI.Type == InlineAsm::isLabel) {
2460 ++LabelNo;
2461 continue;
2462 }
2463
2464 // Only deal with constraints that correspond to call arguments.
2465 if (!CI.hasArg())
2466 continue;
2467
2468 if (CI.isIndirect) {
2469 const Value *Arg = Call.getArgOperand(ArgNo);
2470 Check(Arg->getType()->isPointerTy(),
2471 "Operand for indirect constraint must have pointer type", &Call);
2472
2473 Check(Call.getParamElementType(ArgNo),
2474 "Operand for indirect constraint must have elementtype attribute",
2475 &Call);
2476 } else {
2477 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2478 "Elementtype attribute can only be applied for indirect "
2479 "constraints",
2480 &Call);
2481 }
2482
2483 ArgNo++;
2484 }
2485
2486 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2487 Check(LabelNo == CallBr->getNumIndirectDests(),
2488 "Number of label constraints does not match number of callbr dests",
2489 &Call);
2490 } else {
2491 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2492 &Call);
2493 }
2494}
2495
2496/// Verify that statepoint intrinsic is well formed.
2497void Verifier::verifyStatepoint(const CallBase &Call) {
2498 assert(Call.getCalledFunction() &&
2499 Call.getCalledFunction()->getIntrinsicID() ==
2500 Intrinsic::experimental_gc_statepoint);
2501
2502 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2503 !Call.onlyAccessesArgMemory(),
2504 "gc.statepoint must read and write all memory to preserve "
2505 "reordering restrictions required by safepoint semantics",
2506 Call);
2507
2508 const int64_t NumPatchBytes =
2509 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2510 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2511 Check(NumPatchBytes >= 0,
2512 "gc.statepoint number of patchable bytes must be "
2513 "positive",
2514 Call);
2515
2516 Type *TargetElemType = Call.getParamElementType(2);
2517 Check(TargetElemType,
2518 "gc.statepoint callee argument must have elementtype attribute", Call);
2519 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2520 Check(TargetFuncType,
2521 "gc.statepoint callee elementtype must be function type", Call);
2522
2523 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2524 Check(NumCallArgs >= 0,
2525 "gc.statepoint number of arguments to underlying call "
2526 "must be positive",
2527 Call);
2528 const int NumParams = (int)TargetFuncType->getNumParams();
2529 if (TargetFuncType->isVarArg()) {
2530 Check(NumCallArgs >= NumParams,
2531 "gc.statepoint mismatch in number of vararg call args", Call);
2532
2533 // TODO: Remove this limitation
2534 Check(TargetFuncType->getReturnType()->isVoidTy(),
2535 "gc.statepoint doesn't support wrapping non-void "
2536 "vararg functions yet",
2537 Call);
2538 } else
2539 Check(NumCallArgs == NumParams,
2540 "gc.statepoint mismatch in number of call args", Call);
2541
2542 const uint64_t Flags
2543 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2544 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2545 "unknown flag used in gc.statepoint flags argument", Call);
2546
2547 // Verify that the types of the call parameter arguments match
2548 // the type of the wrapped callee.
2549 AttributeList Attrs = Call.getAttributes();
2550 for (int i = 0; i < NumParams; i++) {
2551 Type *ParamType = TargetFuncType->getParamType(i);
2552 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2553 Check(ArgType == ParamType,
2554 "gc.statepoint call argument does not match wrapped "
2555 "function type",
2556 Call);
2557
2558 if (TargetFuncType->isVarArg()) {
2559 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2560 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2561 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2562 }
2563 }
2564
2565 const int EndCallArgsInx = 4 + NumCallArgs;
2566
2567 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2568 Check(isa<ConstantInt>(NumTransitionArgsV),
2569 "gc.statepoint number of transition arguments "
2570 "must be constant integer",
2571 Call);
2572 const int NumTransitionArgs =
2573 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2574 Check(NumTransitionArgs == 0,
2575 "gc.statepoint w/inline transition bundle is deprecated", Call);
2576 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2577
2578 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2579 Check(isa<ConstantInt>(NumDeoptArgsV),
2580 "gc.statepoint number of deoptimization arguments "
2581 "must be constant integer",
2582 Call);
2583 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2584 Check(NumDeoptArgs == 0,
2585 "gc.statepoint w/inline deopt operands is deprecated", Call);
2586
2587 const int ExpectedNumArgs = 7 + NumCallArgs;
2588 Check(ExpectedNumArgs == (int)Call.arg_size(),
2589 "gc.statepoint too many arguments", Call);
2590
2591 // Check that the only uses of this gc.statepoint are gc.result or
2592 // gc.relocate calls which are tied to this statepoint and thus part
2593 // of the same statepoint sequence
2594 for (const User *U : Call.users()) {
2595 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2596 Check(UserCall, "illegal use of statepoint token", Call, U);
2597 if (!UserCall)
2598 continue;
2599 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2600 "gc.result or gc.relocate are the only value uses "
2601 "of a gc.statepoint",
2602 Call, U);
2603 if (isa<GCResultInst>(UserCall)) {
2604 Check(UserCall->getArgOperand(0) == &Call,
2605 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2606 } else if (isa<GCRelocateInst>(Call)) {
2607 Check(UserCall->getArgOperand(0) == &Call,
2608 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2609 }
2610 }
2611
2612 // Note: It is legal for a single derived pointer to be listed multiple
2613 // times. It's non-optimal, but it is legal. It can also happen after
2614 // insertion if we strip a bitcast away.
2615 // Note: It is really tempting to check that each base is relocated and
2616 // that a derived pointer is never reused as a base pointer. This turns
2617 // out to be problematic since optimizations run after safepoint insertion
2618 // can recognize equality properties that the insertion logic doesn't know
2619 // about. See example statepoint.ll in the verifier subdirectory
2620}
2621
2622void Verifier::verifyFrameRecoverIndices() {
2623 for (auto &Counts : FrameEscapeInfo) {
2624 Function *F = Counts.first;
2625 unsigned EscapedObjectCount = Counts.second.first;
2626 unsigned MaxRecoveredIndex = Counts.second.second;
2627 Check(MaxRecoveredIndex <= EscapedObjectCount,
2628 "all indices passed to llvm.localrecover must be less than the "
2629 "number of arguments passed to llvm.localescape in the parent "
2630 "function",
2631 F);
2632 }
2633}
2634
2635static Instruction *getSuccPad(Instruction *Terminator) {
2636 BasicBlock *UnwindDest;
2637 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2638 UnwindDest = II->getUnwindDest();
2639 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2640 UnwindDest = CSI->getUnwindDest();
2641 else
2642 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2643 return UnwindDest->getFirstNonPHI();
2644}
2645
2646void Verifier::verifySiblingFuncletUnwinds() {
2649 for (const auto &Pair : SiblingFuncletInfo) {
2650 Instruction *PredPad = Pair.first;
2651 if (Visited.count(PredPad))
2652 continue;
2653 Active.insert(PredPad);
2654 Instruction *Terminator = Pair.second;
2655 do {
2656 Instruction *SuccPad = getSuccPad(Terminator);
2657 if (Active.count(SuccPad)) {
2658 // Found a cycle; report error
2659 Instruction *CyclePad = SuccPad;
2661 do {
2662 CycleNodes.push_back(CyclePad);
2663 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2664 if (CycleTerminator != CyclePad)
2665 CycleNodes.push_back(CycleTerminator);
2666 CyclePad = getSuccPad(CycleTerminator);
2667 } while (CyclePad != SuccPad);
2668 Check(false, "EH pads can't handle each other's exceptions",
2669 ArrayRef<Instruction *>(CycleNodes));
2670 }
2671 // Don't re-walk a node we've already checked
2672 if (!Visited.insert(SuccPad).second)
2673 break;
2674 // Walk to this successor if it has a map entry.
2675 PredPad = SuccPad;
2676 auto TermI = SiblingFuncletInfo.find(PredPad);
2677 if (TermI == SiblingFuncletInfo.end())
2678 break;
2679 Terminator = TermI->second;
2680 Active.insert(PredPad);
2681 } while (true);
2682 // Each node only has one successor, so we've walked all the active
2683 // nodes' successors.
2684 Active.clear();
2685 }
2686}
2687
2688// visitFunction - Verify that a function is ok.
2689//
2690void Verifier::visitFunction(const Function &F) {
2691 visitGlobalValue(F);
2692
2693 // Check function arguments.
2694 FunctionType *FT = F.getFunctionType();
2695 unsigned NumArgs = F.arg_size();
2696
2697 Check(&Context == &F.getContext(),
2698 "Function context does not match Module context!", &F);
2699
2700 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2701 Check(FT->getNumParams() == NumArgs,
2702 "# formal arguments must match # of arguments for function type!", &F,
2703 FT);
2704 Check(F.getReturnType()->isFirstClassType() ||
2705 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2706 "Functions cannot return aggregate values!", &F);
2707
2708 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2709 "Invalid struct return type!", &F);
2710
2711 AttributeList Attrs = F.getAttributes();
2712
2713 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2714 "Attribute after last parameter!", &F);
2715
2716 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2717 "Function debug format should match parent module", &F,
2718 F.IsNewDbgInfoFormat, F.getParent(),
2719 F.getParent()->IsNewDbgInfoFormat);
2720
2721 bool IsIntrinsic = F.isIntrinsic();
2722
2723 // Check function attributes.
2724 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2725
2726 // On function declarations/definitions, we do not support the builtin
2727 // attribute. We do not check this in VerifyFunctionAttrs since that is
2728 // checking for Attributes that can/can not ever be on functions.
2729 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2730 "Attribute 'builtin' can only be applied to a callsite.", &F);
2731
2732 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2733 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2734
2735 // Check that this function meets the restrictions on this calling convention.
2736 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2737 // restrictions can be lifted.
2738 switch (F.getCallingConv()) {
2739 default:
2740 case CallingConv::C:
2741 break;
2742 case CallingConv::X86_INTR: {
2743 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2744 "Calling convention parameter requires byval", &F);
2745 break;
2746 }
2751 Check(F.getReturnType()->isVoidTy(),
2752 "Calling convention requires void return type", &F);
2753 [[fallthrough]];
2759 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2760 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2761 const unsigned StackAS = DL.getAllocaAddrSpace();
2762 unsigned i = 0;
2763 for (const Argument &Arg : F.args()) {
2764 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2765 "Calling convention disallows byval", &F);
2766 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2767 "Calling convention disallows preallocated", &F);
2768 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2769 "Calling convention disallows inalloca", &F);
2770
2771 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2772 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2773 // value here.
2774 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2775 "Calling convention disallows stack byref", &F);
2776 }
2777
2778 ++i;
2779 }
2780 }
2781
2782 [[fallthrough]];
2783 case CallingConv::Fast:
2784 case CallingConv::Cold:
2788 Check(!F.isVarArg(),
2789 "Calling convention does not support varargs or "
2790 "perfect forwarding!",
2791 &F);
2792 break;
2793 }
2794
2795 // Check that the argument values match the function type for this function...
2796 unsigned i = 0;
2797 for (const Argument &Arg : F.args()) {
2798 Check(Arg.getType() == FT->getParamType(i),
2799 "Argument value does not match function argument type!", &Arg,
2800 FT->getParamType(i));
2801 Check(Arg.getType()->isFirstClassType(),
2802 "Function arguments must have first-class types!", &Arg);
2803 if (!IsIntrinsic) {
2804 Check(!Arg.getType()->isMetadataTy(),
2805 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2806 Check(!Arg.getType()->isTokenTy(),
2807 "Function takes token but isn't an intrinsic", &Arg, &F);
2808 Check(!Arg.getType()->isX86_AMXTy(),
2809 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2810 }
2811
2812 // Check that swifterror argument is only used by loads and stores.
2813 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2814 verifySwiftErrorValue(&Arg);
2815 }
2816 ++i;
2817 }
2818
2819 if (!IsIntrinsic) {
2820 Check(!F.getReturnType()->isTokenTy(),
2821 "Function returns a token but isn't an intrinsic", &F);
2822 Check(!F.getReturnType()->isX86_AMXTy(),
2823 "Function returns a x86_amx but isn't an intrinsic", &F);
2824 }
2825
2826 // Get the function metadata attachments.
2828 F.getAllMetadata(MDs);
2829 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2830 verifyFunctionMetadata(MDs);
2831
2832 // Check validity of the personality function
2833 if (F.hasPersonalityFn()) {
2834 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2835 if (Per)
2836 Check(Per->getParent() == F.getParent(),
2837 "Referencing personality function in another module!", &F,
2838 F.getParent(), Per, Per->getParent());
2839 }
2840
2841 // EH funclet coloring can be expensive, recompute on-demand
2842 BlockEHFuncletColors.clear();
2843
2844 if (F.isMaterializable()) {
2845 // Function has a body somewhere we can't see.
2846 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2847 MDs.empty() ? nullptr : MDs.front().second);
2848 } else if (F.isDeclaration()) {
2849 for (const auto &I : MDs) {
2850 // This is used for call site debug information.
2851 CheckDI(I.first != LLVMContext::MD_dbg ||
2852 !cast<DISubprogram>(I.second)->isDistinct(),
2853 "function declaration may only have a unique !dbg attachment",
2854 &F);
2855 Check(I.first != LLVMContext::MD_prof,
2856 "function declaration may not have a !prof attachment", &F);
2857
2858 // Verify the metadata itself.
2859 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2860 }
2861 Check(!F.hasPersonalityFn(),
2862 "Function declaration shouldn't have a personality routine", &F);
2863 } else {
2864 // Verify that this function (which has a body) is not named "llvm.*". It
2865 // is not legal to define intrinsics.
2866 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2867
2868 // Check the entry node
2869 const BasicBlock *Entry = &F.getEntryBlock();
2870 Check(pred_empty(Entry),
2871 "Entry block to function must not have predecessors!", Entry);
2872
2873 // The address of the entry block cannot be taken, unless it is dead.
2874 if (Entry->hasAddressTaken()) {
2875 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2876 "blockaddress may not be used with the entry block!", Entry);
2877 }
2878
2879 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2880 NumKCFIAttachments = 0;
2881 // Visit metadata attachments.
2882 for (const auto &I : MDs) {
2883 // Verify that the attachment is legal.
2884 auto AllowLocs = AreDebugLocsAllowed::No;
2885 switch (I.first) {
2886 default:
2887 break;
2888 case LLVMContext::MD_dbg: {
2889 ++NumDebugAttachments;
2890 CheckDI(NumDebugAttachments == 1,
2891 "function must have a single !dbg attachment", &F, I.second);
2892 CheckDI(isa<DISubprogram>(I.second),
2893 "function !dbg attachment must be a subprogram", &F, I.second);
2894 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2895 "function definition may only have a distinct !dbg attachment",
2896 &F);
2897
2898 auto *SP = cast<DISubprogram>(I.second);
2899 const Function *&AttachedTo = DISubprogramAttachments[SP];
2900 CheckDI(!AttachedTo || AttachedTo == &F,
2901 "DISubprogram attached to more than one function", SP, &F);
2902 AttachedTo = &F;
2903 AllowLocs = AreDebugLocsAllowed::Yes;
2904 break;
2905 }
2906 case LLVMContext::MD_prof:
2907 ++NumProfAttachments;
2908 Check(NumProfAttachments == 1,
2909 "function must have a single !prof attachment", &F, I.second);
2910 break;
2911 case LLVMContext::MD_kcfi_type:
2912 ++NumKCFIAttachments;
2913 Check(NumKCFIAttachments == 1,
2914 "function must have a single !kcfi_type attachment", &F,
2915 I.second);
2916 break;
2917 }
2918
2919 // Verify the metadata itself.
2920 visitMDNode(*I.second, AllowLocs);
2921 }
2922 }
2923
2924 // If this function is actually an intrinsic, verify that it is only used in
2925 // direct call/invokes, never having its "address taken".
2926 // Only do this if the module is materialized, otherwise we don't have all the
2927 // uses.
2928 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2929 const User *U;
2930 if (F.hasAddressTaken(&U, false, true, false,
2931 /*IgnoreARCAttachedCall=*/true))
2932 Check(false, "Invalid user of intrinsic instruction!", U);
2933 }
2934
2935 // Check intrinsics' signatures.
2936 switch (F.getIntrinsicID()) {
2937 case Intrinsic::experimental_gc_get_pointer_base: {
2938 FunctionType *FT = F.getFunctionType();
2939 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2940 Check(isa<PointerType>(F.getReturnType()),
2941 "gc.get.pointer.base must return a pointer", F);
2942 Check(FT->getParamType(0) == F.getReturnType(),
2943 "gc.get.pointer.base operand and result must be of the same type", F);
2944 break;
2945 }
2946 case Intrinsic::experimental_gc_get_pointer_offset: {
2947 FunctionType *FT = F.getFunctionType();
2948 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2949 Check(isa<PointerType>(FT->getParamType(0)),
2950 "gc.get.pointer.offset operand must be a pointer", F);
2951 Check(F.getReturnType()->isIntegerTy(),
2952 "gc.get.pointer.offset must return integer", F);
2953 break;
2954 }
2955 }
2956
2957 auto *N = F.getSubprogram();
2958 HasDebugInfo = (N != nullptr);
2959 if (!HasDebugInfo)
2960 return;
2961
2962 // Check that all !dbg attachments lead to back to N.
2963 //
2964 // FIXME: Check this incrementally while visiting !dbg attachments.
2965 // FIXME: Only check when N is the canonical subprogram for F.
2967 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2968 // Be careful about using DILocation here since we might be dealing with
2969 // broken code (this is the Verifier after all).
2970 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2971 if (!DL)
2972 return;
2973 if (!Seen.insert(DL).second)
2974 return;
2975
2976 Metadata *Parent = DL->getRawScope();
2977 CheckDI(Parent && isa<DILocalScope>(Parent),
2978 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
2979
2980 DILocalScope *Scope = DL->getInlinedAtScope();
2981 Check(Scope, "Failed to find DILocalScope", DL);
2982
2983 if (!Seen.insert(Scope).second)
2984 return;
2985
2986 DISubprogram *SP = Scope->getSubprogram();
2987
2988 // Scope and SP could be the same MDNode and we don't want to skip
2989 // validation in that case
2990 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2991 return;
2992
2993 CheckDI(SP->describes(&F),
2994 "!dbg attachment points at wrong subprogram for function", N, &F,
2995 &I, DL, Scope, SP);
2996 };
2997 for (auto &BB : F)
2998 for (auto &I : BB) {
2999 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3000 // The llvm.loop annotations also contain two DILocations.
3001 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3002 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3003 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3004 if (BrokenDebugInfo)
3005 return;
3006 }
3007}
3008
3009// verifyBasicBlock - Verify that a basic block is well formed...
3010//
3011void Verifier::visitBasicBlock(BasicBlock &BB) {
3012 InstsInThisBlock.clear();
3013 ConvergenceVerifyHelper.visit(BB);
3014
3015 // Ensure that basic blocks have terminators!
3016 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3017
3018 // Check constraints that this basic block imposes on all of the PHI nodes in
3019 // it.
3020 if (isa<PHINode>(BB.front())) {
3023 llvm::sort(Preds);
3024 for (const PHINode &PN : BB.phis()) {
3025 Check(PN.getNumIncomingValues() == Preds.size(),
3026 "PHINode should have one entry for each predecessor of its "
3027 "parent basic block!",
3028 &PN);
3029
3030 // Get and sort all incoming values in the PHI node...
3031 Values.clear();
3032 Values.reserve(PN.getNumIncomingValues());
3033 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3034 Values.push_back(
3035 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3036 llvm::sort(Values);
3037
3038 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3039 // Check to make sure that if there is more than one entry for a
3040 // particular basic block in this PHI node, that the incoming values are
3041 // all identical.
3042 //
3043 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3044 Values[i].second == Values[i - 1].second,
3045 "PHI node has multiple entries for the same basic block with "
3046 "different incoming values!",
3047 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3048
3049 // Check to make sure that the predecessors and PHI node entries are
3050 // matched up.
3051 Check(Values[i].first == Preds[i],
3052 "PHI node entries do not match predecessors!", &PN,
3053 Values[i].first, Preds[i]);
3054 }
3055 }
3056 }
3057
3058 // Check that all instructions have their parent pointers set up correctly.
3059 for (auto &I : BB)
3060 {
3061 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3062 }
3063
3064 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3065 "BB debug format should match parent function", &BB,
3066 BB.IsNewDbgInfoFormat, BB.getParent(),
3067 BB.getParent()->IsNewDbgInfoFormat);
3068
3069 // Confirm that no issues arise from the debug program.
3070 if (BB.IsNewDbgInfoFormat)
3071 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3072 &BB);
3073}
3074
3075void Verifier::visitTerminator(Instruction &I) {
3076 // Ensure that terminators only exist at the end of the basic block.
3077 Check(&I == I.getParent()->getTerminator(),
3078 "Terminator found in the middle of a basic block!", I.getParent());
3080}
3081
3082void Verifier::visitBranchInst(BranchInst &BI) {
3083 if (BI.isConditional()) {
3085 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3086 }
3087 visitTerminator(BI);
3088}
3089
3090void Verifier::visitReturnInst(ReturnInst &RI) {
3091 Function *F = RI.getParent()->getParent();
3092 unsigned N = RI.getNumOperands();
3093 if (F->getReturnType()->isVoidTy())
3094 Check(N == 0,
3095 "Found return instr that returns non-void in Function of void "
3096 "return type!",
3097 &RI, F->getReturnType());
3098 else
3099 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3100 "Function return type does not match operand "
3101 "type of return inst!",
3102 &RI, F->getReturnType());
3103
3104 // Check to make sure that the return value has necessary properties for
3105 // terminators...
3106 visitTerminator(RI);
3107}
3108
3109void Verifier::visitSwitchInst(SwitchInst &SI) {
3110 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3111 // Check to make sure that all of the constants in the switch instruction
3112 // have the same type as the switched-on value.
3113 Type *SwitchTy = SI.getCondition()->getType();
3115 for (auto &Case : SI.cases()) {
3116 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3117 "Case value is not a constant integer.", &SI);
3118 Check(Case.getCaseValue()->getType() == SwitchTy,
3119 "Switch constants must all be same type as switch value!", &SI);
3120 Check(Constants.insert(Case.getCaseValue()).second,
3121 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3122 }
3123
3124 visitTerminator(SI);
3125}
3126
3127void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3129 "Indirectbr operand must have pointer type!", &BI);
3130 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3132 "Indirectbr destinations must all have pointer type!", &BI);
3133
3134 visitTerminator(BI);
3135}
3136
3137void Verifier::visitCallBrInst(CallBrInst &CBI) {
3138 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3139 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3140 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3141
3142 verifyInlineAsmCall(CBI);
3143 visitTerminator(CBI);
3144}
3145
3146void Verifier::visitSelectInst(SelectInst &SI) {
3147 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3148 SI.getOperand(2)),
3149 "Invalid operands for select instruction!", &SI);
3150
3151 Check(SI.getTrueValue()->getType() == SI.getType(),
3152 "Select values must have same type as select instruction!", &SI);
3153 visitInstruction(SI);
3154}
3155
3156/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3157/// a pass, if any exist, it's an error.
3158///
3159void Verifier::visitUserOp1(Instruction &I) {
3160 Check(false, "User-defined operators should not live outside of a pass!", &I);
3161}
3162
3163void Verifier::visitTruncInst(TruncInst &I) {
3164 // Get the source and destination types
3165 Type *SrcTy = I.getOperand(0)->getType();
3166 Type *DestTy = I.getType();
3167
3168 // Get the size of the types in bits, we'll need this later
3169 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3170 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3171
3172 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3173 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3174 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3175 "trunc source and destination must both be a vector or neither", &I);
3176 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3177
3179}
3180
3181void Verifier::visitZExtInst(ZExtInst &I) {
3182 // Get the source and destination types
3183 Type *SrcTy = I.getOperand(0)->getType();
3184 Type *DestTy = I.getType();
3185
3186 // Get the size of the types in bits, we'll need this later
3187 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3188 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3189 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3190 "zext source and destination must both be a vector or neither", &I);
3191 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3192 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3193
3194 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3195
3197}
3198
3199void Verifier::visitSExtInst(SExtInst &I) {
3200 // Get the source and destination types
3201 Type *SrcTy = I.getOperand(0)->getType();
3202 Type *DestTy = I.getType();
3203
3204 // Get the size of the types in bits, we'll need this later
3205 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3206 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3207
3208 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3209 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3210 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3211 "sext source and destination must both be a vector or neither", &I);
3212 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3213
3215}
3216
3217void Verifier::visitFPTruncInst(FPTruncInst &I) {
3218 // Get the source and destination types
3219 Type *SrcTy = I.getOperand(0)->getType();
3220 Type *DestTy = I.getType();
3221 // Get the size of the types in bits, we'll need this later
3222 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3223 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3224
3225 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3226 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3227 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3228 "fptrunc source and destination must both be a vector or neither", &I);
3229 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3230
3232}
3233
3234void Verifier::visitFPExtInst(FPExtInst &I) {
3235 // Get the source and destination types
3236 Type *SrcTy = I.getOperand(0)->getType();
3237 Type *DestTy = I.getType();
3238
3239 // Get the size of the types in bits, we'll need this later
3240 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3241 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3242
3243 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3244 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3245 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3246 "fpext source and destination must both be a vector or neither", &I);
3247 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3248
3250}
3251
3252void Verifier::visitUIToFPInst(UIToFPInst &I) {
3253 // Get the source and destination types
3254 Type *SrcTy = I.getOperand(0)->getType();
3255 Type *DestTy = I.getType();
3256
3257 bool SrcVec = SrcTy->isVectorTy();
3258 bool DstVec = DestTy->isVectorTy();
3259
3260 Check(SrcVec == DstVec,
3261 "UIToFP source and dest must both be vector or scalar", &I);
3262 Check(SrcTy->isIntOrIntVectorTy(),
3263 "UIToFP source must be integer or integer vector", &I);
3264 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3265 &I);
3266
3267 if (SrcVec && DstVec)
3268 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3269 cast<VectorType>(DestTy)->getElementCount(),
3270 "UIToFP source and dest vector length mismatch", &I);
3271
3273}
3274
3275void Verifier::visitSIToFPInst(SIToFPInst &I) {
3276 // Get the source and destination types
3277 Type *SrcTy = I.getOperand(0)->getType();
3278 Type *DestTy = I.getType();
3279
3280 bool SrcVec = SrcTy->isVectorTy();
3281 bool DstVec = DestTy->isVectorTy();
3282
3283 Check(SrcVec == DstVec,
3284 "SIToFP source and dest must both be vector or scalar", &I);
3285 Check(SrcTy->isIntOrIntVectorTy(),
3286 "SIToFP source must be integer or integer vector", &I);
3287 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3288 &I);
3289
3290 if (SrcVec && DstVec)
3291 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3292 cast<VectorType>(DestTy)->getElementCount(),
3293 "SIToFP source and dest vector length mismatch", &I);
3294
3296}
3297
3298void Verifier::visitFPToUIInst(FPToUIInst &I) {
3299 // Get the source and destination types
3300 Type *SrcTy = I.getOperand(0)->getType();
3301 Type *DestTy = I.getType();
3302
3303 bool SrcVec = SrcTy->isVectorTy();
3304 bool DstVec = DestTy->isVectorTy();
3305
3306 Check(SrcVec == DstVec,
3307 "FPToUI source and dest must both be vector or scalar", &I);
3308 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3309 Check(DestTy->isIntOrIntVectorTy(),
3310 "FPToUI result must be integer or integer vector", &I);
3311
3312 if (SrcVec && DstVec)
3313 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3314 cast<VectorType>(DestTy)->getElementCount(),
3315 "FPToUI source and dest vector length mismatch", &I);
3316
3318}
3319
3320void Verifier::visitFPToSIInst(FPToSIInst &I) {
3321 // Get the source and destination types
3322 Type *SrcTy = I.getOperand(0)->getType();
3323 Type *DestTy = I.getType();
3324
3325 bool SrcVec = SrcTy->isVectorTy();
3326 bool DstVec = DestTy->isVectorTy();
3327
3328 Check(SrcVec == DstVec,
3329 "FPToSI source and dest must both be vector or scalar", &I);
3330 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3331 Check(DestTy->isIntOrIntVectorTy(),
3332 "FPToSI result must be integer or integer vector", &I);
3333
3334 if (SrcVec && DstVec)
3335 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3336 cast<VectorType>(DestTy)->getElementCount(),
3337 "FPToSI source and dest vector length mismatch", &I);
3338
3340}
3341
3342void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3343 // Get the source and destination types
3344 Type *SrcTy = I.getOperand(0)->getType();
3345 Type *DestTy = I.getType();
3346
3347 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3348
3349 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3350 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3351 &I);
3352
3353 if (SrcTy->isVectorTy()) {
3354 auto *VSrc = cast<VectorType>(SrcTy);
3355 auto *VDest = cast<VectorType>(DestTy);
3356 Check(VSrc->getElementCount() == VDest->getElementCount(),
3357 "PtrToInt Vector width mismatch", &I);
3358 }
3359
3361}
3362
3363void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3364 // Get the source and destination types
3365 Type *SrcTy = I.getOperand(0)->getType();
3366 Type *DestTy = I.getType();
3367
3368 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3369 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3370
3371 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3372 &I);
3373 if (SrcTy->isVectorTy()) {
3374 auto *VSrc = cast<VectorType>(SrcTy);
3375 auto *VDest = cast<VectorType>(DestTy);
3376 Check(VSrc->getElementCount() == VDest->getElementCount(),
3377 "IntToPtr Vector width mismatch", &I);
3378 }
3380}
3381
3382void Verifier::visitBitCastInst(BitCastInst &I) {
3383 Check(
3384 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3385 "Invalid bitcast", &I);
3387}
3388
3389void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3390 Type *SrcTy = I.getOperand(0)->getType();
3391 Type *DestTy = I.getType();
3392
3393 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3394 &I);
3395 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3396 &I);
3398 "AddrSpaceCast must be between different address spaces", &I);
3399 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3400 Check(SrcVTy->getElementCount() ==
3401 cast<VectorType>(DestTy)->getElementCount(),
3402 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3404}
3405
3406/// visitPHINode - Ensure that a PHI node is well formed.
3407///
3408void Verifier::visitPHINode(PHINode &PN) {
3409 // Ensure that the PHI nodes are all grouped together at the top of the block.
3410 // This can be tested by checking whether the instruction before this is
3411 // either nonexistent (because this is begin()) or is a PHI node. If not,
3412 // then there is some other instruction before a PHI.
3413 Check(&PN == &PN.getParent()->front() ||
3414 isa<PHINode>(--BasicBlock::iterator(&PN)),
3415 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3416
3417 // Check that a PHI doesn't yield a Token.
3418 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3419
3420 // Check that all of the values of the PHI node have the same type as the
3421 // result.
3422 for (Value *IncValue : PN.incoming_values()) {
3423 Check(PN.getType() == IncValue->getType(),
3424 "PHI node operands are not the same type as the result!", &PN);
3425 }
3426
3427 // All other PHI node constraints are checked in the visitBasicBlock method.
3428
3429 visitInstruction(PN);
3430}
3431
3432void Verifier::visitCallBase(CallBase &Call) {
3433 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3434 "Called function must be a pointer!", Call);
3435 FunctionType *FTy = Call.getFunctionType();
3436
3437 // Verify that the correct number of arguments are being passed
3438 if (FTy->isVarArg())
3439 Check(Call.arg_size() >= FTy->getNumParams(),
3440 "Called function requires more parameters than were provided!", Call);
3441 else
3442 Check(Call.arg_size() == FTy->getNumParams(),
3443 "Incorrect number of arguments passed to called function!", Call);
3444
3445 // Verify that all arguments to the call match the function type.
3446 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3447 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3448 "Call parameter type does not match function signature!",
3449 Call.getArgOperand(i), FTy->getParamType(i), Call);
3450
3451 AttributeList Attrs = Call.getAttributes();
3452
3453 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3454 "Attribute after last parameter!", Call);
3455
3456 Function *Callee =
3457 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3458 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3459 if (IsIntrinsic)
3460 Check(Callee->getValueType() == FTy,
3461 "Intrinsic called with incompatible signature", Call);
3462
3463 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3464 // convention.
3465 auto CC = Call.getCallingConv();
3468 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3469 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3470 Call);
3471
3472 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3473 if (!Ty->isSized())
3474 return;
3475 Align ABIAlign = DL.getABITypeAlign(Ty);
3476 Align MaxAlign(ParamMaxAlignment);
3477 Check(ABIAlign <= MaxAlign,
3478 "Incorrect alignment of " + Message + " to called function!", Call);
3479 };
3480
3481 if (!IsIntrinsic) {
3482 VerifyTypeAlign(FTy->getReturnType(), "return type");
3483 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3484 Type *Ty = FTy->getParamType(i);
3485 VerifyTypeAlign(Ty, "argument passed");
3486 }
3487 }
3488
3489 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3490 // Don't allow speculatable on call sites, unless the underlying function
3491 // declaration is also speculatable.
3492 Check(Callee && Callee->isSpeculatable(),
3493 "speculatable attribute may not apply to call sites", Call);
3494 }
3495
3496 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3497 Check(Call.getCalledFunction()->getIntrinsicID() ==
3498 Intrinsic::call_preallocated_arg,
3499 "preallocated as a call site attribute can only be on "
3500 "llvm.call.preallocated.arg");
3501 }
3502
3503 // Verify call attributes.
3504 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3505
3506 // Conservatively check the inalloca argument.
3507 // We have a bug if we can find that there is an underlying alloca without
3508 // inalloca.
3509 if (Call.hasInAllocaArgument()) {
3510 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3511 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3512 Check(AI->isUsedWithInAlloca(),
3513 "inalloca argument for call has mismatched alloca", AI, Call);
3514 }
3515
3516 // For each argument of the callsite, if it has the swifterror argument,
3517 // make sure the underlying alloca/parameter it comes from has a swifterror as
3518 // well.
3519 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3520 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3521 Value *SwiftErrorArg = Call.getArgOperand(i);
3522 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3523 Check(AI->isSwiftError(),
3524 "swifterror argument for call has mismatched alloca", AI, Call);
3525 continue;
3526 }
3527 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3528 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3529 SwiftErrorArg, Call);
3530 Check(ArgI->hasSwiftErrorAttr(),
3531 "swifterror argument for call has mismatched parameter", ArgI,
3532 Call);
3533 }
3534
3535 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3536 // Don't allow immarg on call sites, unless the underlying declaration
3537 // also has the matching immarg.
3538 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3539 "immarg may not apply only to call sites", Call.getArgOperand(i),
3540 Call);
3541 }
3542
3543 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3544 Value *ArgVal = Call.getArgOperand(i);
3545 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3546 "immarg operand has non-immediate parameter", ArgVal, Call);
3547 }
3548
3549 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3550 Value *ArgVal = Call.getArgOperand(i);
3551 bool hasOB =
3552 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3553 bool isMustTail = Call.isMustTailCall();
3554 Check(hasOB != isMustTail,
3555 "preallocated operand either requires a preallocated bundle or "
3556 "the call to be musttail (but not both)",
3557 ArgVal, Call);
3558 }
3559 }
3560
3561 if (FTy->isVarArg()) {
3562 // FIXME? is 'nest' even legal here?
3563 bool SawNest = false;
3564 bool SawReturned = false;
3565
3566 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3567 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3568 SawNest = true;
3569 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3570 SawReturned = true;
3571 }
3572
3573 // Check attributes on the varargs part.
3574 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3575 Type *Ty = Call.getArgOperand(Idx)->getType();
3576 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3577 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3578
3579 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3580 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3581 SawNest = true;
3582 }
3583
3584 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3585 Check(!SawReturned, "More than one parameter has attribute returned!",
3586 Call);
3587 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3588 "Incompatible argument and return types for 'returned' "
3589 "attribute",
3590 Call);
3591 SawReturned = true;
3592 }
3593
3594 // Statepoint intrinsic is vararg but the wrapped function may be not.
3595 // Allow sret here and check the wrapped function in verifyStatepoint.
3596 if (!Call.getCalledFunction() ||
3597 Call.getCalledFunction()->getIntrinsicID() !=
3598 Intrinsic::experimental_gc_statepoint)
3599 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3600 "Attribute 'sret' cannot be used for vararg call arguments!",
3601 Call);
3602
3603 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3604 Check(Idx == Call.arg_size() - 1,
3605 "inalloca isn't on the last argument!", Call);
3606 }
3607 }
3608
3609 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3610 if (!IsIntrinsic) {
3611 for (Type *ParamTy : FTy->params()) {
3612 Check(!ParamTy->isMetadataTy(),
3613 "Function has metadata parameter but isn't an intrinsic", Call);
3614 Check(!ParamTy->isTokenTy(),
3615 "Function has token parameter but isn't an intrinsic", Call);
3616 }
3617 }
3618
3619 // Verify that indirect calls don't return tokens.
3620 if (!Call.getCalledFunction()) {
3621 Check(!FTy->getReturnType()->isTokenTy(),
3622 "Return type cannot be token for indirect call!");
3623 Check(!FTy->getReturnType()->isX86_AMXTy(),
3624 "Return type cannot be x86_amx for indirect call!");
3625 }
3626
3627 if (Function *F = Call.getCalledFunction())
3628 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3629 visitIntrinsicCall(ID, Call);
3630
3631 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3632 // most one "gc-transition", at most one "cfguardtarget", at most one
3633 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3634 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3635 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3636 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3637 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3638 FoundAttachedCallBundle = false;
3639 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3640 OperandBundleUse BU = Call.getOperandBundleAt(i);
3641 uint32_t Tag = BU.getTagID();
3642 if (Tag == LLVMContext::OB_deopt) {
3643 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3644 FoundDeoptBundle = true;
3645 } else if (Tag == LLVMContext::OB_gc_transition) {
3646 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3647 Call);
3648 FoundGCTransitionBundle = true;
3649 } else if (Tag == LLVMContext::OB_funclet) {
3650 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3651 FoundFuncletBundle = true;
3652 Check(BU.Inputs.size() == 1,
3653 "Expected exactly one funclet bundle operand", Call);
3654 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3655 "Funclet bundle operands should correspond to a FuncletPadInst",
3656 Call);
3657 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3658 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3659 Call);
3660 FoundCFGuardTargetBundle = true;
3661 Check(BU.Inputs.size() == 1,
3662 "Expected exactly one cfguardtarget bundle operand", Call);
3663 } else if (Tag == LLVMContext::OB_ptrauth) {
3664 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3665 FoundPtrauthBundle = true;
3666 Check(BU.Inputs.size() == 2,
3667 "Expected exactly two ptrauth bundle operands", Call);
3668 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3669 BU.Inputs[0]->getType()->isIntegerTy(32),
3670 "Ptrauth bundle key operand must be an i32 constant", Call);
3671 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3672 "Ptrauth bundle discriminator operand must be an i64", Call);
3673 } else if (Tag == LLVMContext::OB_kcfi) {
3674 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3675 FoundKCFIBundle = true;
3676 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3677 Call);
3678 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3679 BU.Inputs[0]->getType()->isIntegerTy(32),
3680 "Kcfi bundle operand must be an i32 constant", Call);
3681 } else if (Tag == LLVMContext::OB_preallocated) {
3682 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3683 Call);
3684 FoundPreallocatedBundle = true;
3685 Check(BU.Inputs.size() == 1,
3686 "Expected exactly one preallocated bundle operand", Call);
3687 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3688 Check(Input &&
3689 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3690 "\"preallocated\" argument must be a token from "
3691 "llvm.call.preallocated.setup",
3692 Call);
3693 } else if (Tag == LLVMContext::OB_gc_live) {
3694 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3695 FoundGCLiveBundle = true;
3697 Check(!FoundAttachedCallBundle,
3698 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3699 FoundAttachedCallBundle = true;
3700 verifyAttachedCallBundle(Call, BU);
3701 }
3702 }
3703
3704 // Verify that callee and callsite agree on whether to use pointer auth.
3705 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3706 "Direct call cannot have a ptrauth bundle", Call);
3707
3708 // Verify that each inlinable callsite of a debug-info-bearing function in a
3709 // debug-info-bearing function has a debug location attached to it. Failure to
3710 // do so causes assertion failures when the inliner sets up inline scope info
3711 // (Interposable functions are not inlinable, neither are functions without
3712 // definitions.)
3713 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3714 !Call.getCalledFunction()->isInterposable() &&
3715 !Call.getCalledFunction()->isDeclaration() &&
3716 Call.getCalledFunction()->getSubprogram())
3717 CheckDI(Call.getDebugLoc(),
3718 "inlinable function call in a function with "
3719 "debug info must have a !dbg location",
3720 Call);
3721
3722 if (Call.isInlineAsm())
3723 verifyInlineAsmCall(Call);
3724
3725 ConvergenceVerifyHelper.visit(Call);
3726
3727 visitInstruction(Call);
3728}
3729
3730void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3731 StringRef Context) {
3732 Check(!Attrs.contains(Attribute::InAlloca),
3733 Twine("inalloca attribute not allowed in ") + Context);
3734 Check(!Attrs.contains(Attribute::InReg),
3735 Twine("inreg attribute not allowed in ") + Context);
3736 Check(!Attrs.contains(Attribute::SwiftError),
3737 Twine("swifterror attribute not allowed in ") + Context);
3738 Check(!Attrs.contains(Attribute::Preallocated),
3739 Twine("preallocated attribute not allowed in ") + Context);
3740 Check(!Attrs.contains(Attribute::ByRef),
3741 Twine("byref attribute not allowed in ") + Context);
3742}
3743
3744/// Two types are "congruent" if they are identical, or if they are both pointer
3745/// types with different pointee types and the same address space.
3746static bool isTypeCongruent(Type *L, Type *R) {
3747 if (L == R)
3748 return true;
3749 PointerType *PL = dyn_cast<PointerType>(L);
3750 PointerType *PR = dyn_cast<PointerType>(R);
3751 if (!PL || !PR)
3752 return false;
3753 return PL->getAddressSpace() == PR->getAddressSpace();
3754}
3755
3757 static const Attribute::AttrKind ABIAttrs[] = {
3758 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3759 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3760 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3761 Attribute::ByRef};
3762 AttrBuilder Copy(C);
3763 for (auto AK : ABIAttrs) {
3764 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3765 if (Attr.isValid())
3766 Copy.addAttribute(Attr);
3767 }
3768
3769 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3770 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3771 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3772 Attrs.hasParamAttr(I, Attribute::ByRef)))
3773 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3774 return Copy;
3775}
3776
3777void Verifier::verifyMustTailCall(CallInst &CI) {
3778 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3779
3780 Function *F = CI.getParent()->getParent();
3781 FunctionType *CallerTy = F->getFunctionType();
3782 FunctionType *CalleeTy = CI.getFunctionType();
3783 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3784 "cannot guarantee tail call due to mismatched varargs", &CI);
3785 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3786 "cannot guarantee tail call due to mismatched return types", &CI);
3787
3788 // - The calling conventions of the caller and callee must match.
3789 Check(F->getCallingConv() == CI.getCallingConv(),
3790 "cannot guarantee tail call due to mismatched calling conv", &CI);
3791
3792 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3793 // or a pointer bitcast followed by a ret instruction.
3794 // - The ret instruction must return the (possibly bitcasted) value
3795 // produced by the call or void.
3796 Value *RetVal = &CI;
3797 Instruction *Next = CI.getNextNode();
3798
3799 // Handle the optional bitcast.
3800 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3801 Check(BI->getOperand(0) == RetVal,
3802 "bitcast following musttail call must use the call", BI);
3803 RetVal = BI;
3804 Next = BI->getNextNode();
3805 }
3806
3807 // Check the return.
3808 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3809 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3810 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3811 isa<UndefValue>(Ret->getReturnValue()),
3812 "musttail call result must be returned", Ret);
3813
3814 AttributeList CallerAttrs = F->getAttributes();
3815 AttributeList CalleeAttrs = CI.getAttributes();
3818 StringRef CCName =
3819 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3820
3821 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3822 // are allowed in swifttailcc call
3823 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3824 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3825 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3826 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3827 }
3828 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3829 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3830 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3831 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3832 }
3833 // - Varargs functions are not allowed
3834 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3835 " tail call for varargs function");
3836 return;
3837 }
3838
3839 // - The caller and callee prototypes must match. Pointer types of
3840 // parameters or return types may differ in pointee type, but not
3841 // address space.
3842 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3843 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3844 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3845 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3846 Check(
3847 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3848 "cannot guarantee tail call due to mismatched parameter types", &CI);
3849 }
3850 }
3851
3852 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3853 // returned, preallocated, and inalloca, must match.
3854 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3855 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3856 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3857 Check(CallerABIAttrs == CalleeABIAttrs,
3858 "cannot guarantee tail call due to mismatched ABI impacting "
3859 "function attributes",
3860 &CI, CI.getOperand(I));
3861 }
3862}
3863
3864void Verifier::visitCallInst(CallInst &CI) {
3865 visitCallBase(CI);
3866
3867 if (CI.isMustTailCall())
3868 verifyMustTailCall(CI);
3869}
3870
3871void Verifier::visitInvokeInst(InvokeInst &II) {
3872 visitCallBase(II);
3873
3874 // Verify that the first non-PHI instruction of the unwind destination is an
3875 // exception handling instruction.
3876 Check(
3877 II.getUnwindDest()->isEHPad(),
3878 "The unwind destination does not have an exception handling instruction!",
3879 &II);
3880
3881 visitTerminator(II);
3882}
3883
3884/// visitUnaryOperator - Check the argument to the unary operator.
3885///
3886void Verifier::visitUnaryOperator(UnaryOperator &U) {
3887 Check(U.getType() == U.getOperand(0)->getType(),
3888 "Unary operators must have same type for"
3889 "operands and result!",
3890 &U);
3891
3892 switch (U.getOpcode()) {
3893 // Check that floating-point arithmetic operators are only used with
3894 // floating-point operands.
3895 case Instruction::FNeg:
3896 Check(U.getType()->isFPOrFPVectorTy(),
3897 "FNeg operator only works with float types!", &U);
3898 break;
3899 default:
3900 llvm_unreachable("Unknown UnaryOperator opcode!");
3901 }
3902
3904}
3905
3906/// visitBinaryOperator - Check that both arguments to the binary operator are
3907/// of the same type!
3908///
3909void Verifier::visitBinaryOperator(BinaryOperator &B) {
3910 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3911 "Both operands to a binary operator are not of the same type!", &B);
3912
3913 switch (B.getOpcode()) {
3914 // Check that integer arithmetic operators are only used with
3915 // integral operands.
3916 case Instruction::Add:
3917 case Instruction::Sub:
3918 case Instruction::Mul:
3919 case Instruction::SDiv:
3920 case Instruction::UDiv:
3921 case Instruction::SRem:
3922 case Instruction::URem:
3923 Check(B.getType()->isIntOrIntVectorTy(),
3924 "Integer arithmetic operators only work with integral types!", &B);
3925 Check(B.getType() == B.getOperand(0)->getType(),
3926 "Integer arithmetic operators must have same type "
3927 "for operands and result!",
3928 &B);
3929 break;
3930 // Check that floating-point arithmetic operators are only used with
3931 // floating-point operands.
3932 case Instruction::FAdd:
3933 case Instruction::FSub:
3934 case Instruction::FMul:
3935 case Instruction::FDiv:
3936 case Instruction::FRem:
3937 Check(B.getType()->isFPOrFPVectorTy(),
3938 "Floating-point arithmetic operators only work with "
3939 "floating-point types!",
3940 &B);
3941 Check(B.getType() == B.getOperand(0)->getType(),
3942 "Floating-point arithmetic operators must have same type "
3943 "for operands and result!",
3944 &B);
3945 break;
3946 // Check that logical operators are only used with integral operands.
3947 case Instruction::And:
3948 case Instruction::Or:
3949 case Instruction::Xor:
3950 Check(B.getType()->isIntOrIntVectorTy(),
3951 "Logical operators only work with integral types!", &B);
3952 Check(B.getType() == B.getOperand(0)->getType(),
3953 "Logical operators must have same type for operands and result!", &B);
3954 break;
3955 case Instruction::Shl:
3956 case Instruction::LShr:
3957 case Instruction::AShr:
3958 Check(B.getType()->isIntOrIntVectorTy(),
3959 "Shifts only work with integral types!", &B);
3960 Check(B.getType() == B.getOperand(0)->getType(),
3961 "Shift return type must be same as operands!", &B);
3962 break;
3963 default:
3964 llvm_unreachable("Unknown BinaryOperator opcode!");
3965 }
3966
3968}
3969
3970void Verifier::visitICmpInst(ICmpInst &IC) {
3971 // Check that the operands are the same type
3972 Type *Op0Ty = IC.getOperand(0)->getType();
3973 Type *Op1Ty = IC.getOperand(1)->getType();
3974 Check(Op0Ty == Op1Ty,
3975 "Both operands to ICmp instruction are not of the same type!", &IC);
3976 // Check that the operands are the right type
3977 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3978 "Invalid operand types for ICmp instruction", &IC);
3979 // Check that the predicate is valid.
3980 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
3981
3982 visitInstruction(IC);
3983}
3984
3985void Verifier::visitFCmpInst(FCmpInst &FC) {
3986 // Check that the operands are the same type
3987 Type *Op0Ty = FC.getOperand(0)->getType();
3988 Type *Op1Ty = FC.getOperand(1)->getType();
3989 Check(Op0Ty == Op1Ty,
3990 "Both operands to FCmp instruction are not of the same type!", &FC);
3991 // Check that the operands are the right type
3992 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
3993 &FC);
3994 // Check that the predicate is valid.
3995 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
3996
3997 visitInstruction(FC);
3998}
3999
4000void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4002 "Invalid extractelement operands!", &EI);
4003 visitInstruction(EI);
4004}
4005
4006void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4007 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4008 IE.getOperand(2)),
4009 "Invalid insertelement operands!", &IE);
4010 visitInstruction(IE);
4011}
4012
4013void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4015 SV.getShuffleMask()),
4016 "Invalid shufflevector operands!", &SV);
4017 visitInstruction(SV);
4018}
4019
4020void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4021 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4022
4023 Check(isa<PointerType>(TargetTy),
4024 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4025 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4026
4027 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4028 SmallPtrSet<Type *, 4> Visited;
4029 Check(!STy->containsScalableVectorType(&Visited),
4030 "getelementptr cannot target structure that contains scalable vector"
4031 "type",
4032 &GEP);
4033 }
4034
4035 SmallVector<Value *, 16> Idxs(GEP.indices());
4036 Check(
4037 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4038 "GEP indexes must be integers", &GEP);
4039 Type *ElTy =
4040 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4041 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4042
4043 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4044 GEP.getResultElementType() == ElTy,
4045 "GEP is not of right type for indices!", &GEP, ElTy);
4046
4047 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4048 // Additional checks for vector GEPs.
4049 ElementCount GEPWidth = GEPVTy->getElementCount();
4050 if (GEP.getPointerOperandType()->isVectorTy())
4051 Check(
4052 GEPWidth ==
4053 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4054 "Vector GEP result width doesn't match operand's", &GEP);
4055 for (Value *Idx : Idxs) {
4056 Type *IndexTy = Idx->getType();
4057 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4058 ElementCount IndexWidth = IndexVTy->getElementCount();
4059 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4060 }
4061 Check(IndexTy->isIntOrIntVectorTy(),
4062 "All GEP indices should be of integer type");
4063 }
4064 }
4065
4066 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4067 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4068 "GEP address space doesn't match type", &GEP);
4069 }
4070
4072}
4073
4074static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4075 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4076}
4077
4078/// Verify !range and !absolute_symbol metadata. These have the same
4079/// restrictions, except !absolute_symbol allows the full set.
4080void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4081 Type *Ty, bool IsAbsoluteSymbol) {
4082 unsigned NumOperands = Range->getNumOperands();
4083 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4084 unsigned NumRanges = NumOperands / 2;
4085 Check(NumRanges >= 1, "It should have at least one range!", Range);
4086
4087 ConstantRange LastRange(1, true); // Dummy initial value
4088 for (unsigned i = 0; i < NumRanges; ++i) {
4089 ConstantInt *Low =
4090 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4091 Check(Low, "The lower limit must be an integer!", Low);
4092 ConstantInt *High =
4093 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4094 Check(High, "The upper limit must be an integer!", High);
4095 Check(High->getType() == Low->getType() &&
4096 High->getType() == Ty->getScalarType(),
4097 "Range types must match instruction type!", &I);
4098
4099 APInt HighV = High->getValue();
4100 APInt LowV = Low->getValue();
4101
4102 // ConstantRange asserts if the ranges are the same except for the min/max
4103 // value. Leave the cases it tolerates for the empty range error below.
4104 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4105 "The upper and lower limits cannot be the same value", &I);
4106
4107 ConstantRange CurRange(LowV, HighV);
4108 Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4109 "Range must not be empty!", Range);
4110 if (i != 0) {
4111 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4112 "Intervals are overlapping", Range);
4113 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4114 Range);
4115 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4116 Range);
4117 }
4118 LastRange = ConstantRange(LowV, HighV);
4119 }
4120 if (NumRanges > 2) {
4121 APInt FirstLow =
4122 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4123 APInt FirstHigh =
4124 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4125 ConstantRange FirstRange(FirstLow, FirstHigh);
4126 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4127 "Intervals are overlapping", Range);
4128 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4129 Range);
4130 }
4131}
4132
4133void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4134 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4135 "precondition violation");
4136 verifyRangeMetadata(I, Range, Ty, false);
4137}
4138
4139void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4140 unsigned Size = DL.getTypeSizeInBits(Ty);
4141 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4142 Check(!(Size & (Size - 1)),
4143 "atomic memory access' operand must have a power-of-two size", Ty, I);
4144}
4145
4146void Verifier::visitLoadInst(LoadInst &LI) {
4147 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4148 Check(PTy, "Load operand must be a pointer.", &LI);
4149 Type *ElTy = LI.getType();
4150 if (MaybeAlign A = LI.getAlign()) {
4151 Check(A->value() <= Value::MaximumAlignment,
4152 "huge alignment values are unsupported", &LI);
4153 }
4154 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4155 if (LI.isAtomic()) {
4158 "Load cannot have Release ordering", &LI);
4159 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4160 "atomic load operand must have integer, pointer, or floating point "
4161 "type!",
4162 ElTy, &LI);
4163 checkAtomicMemAccessSize(ElTy, &LI);
4164 } else {
4166 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4167 }
4168
4169 visitInstruction(LI);
4170}
4171
4172void Verifier::visitStoreInst(StoreInst &SI) {
4173 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4174 Check(PTy, "Store operand must be a pointer.", &SI);
4175 Type *ElTy = SI.getOperand(0)->getType();
4176 if (MaybeAlign A = SI.getAlign()) {
4177 Check(A->value() <= Value::MaximumAlignment,
4178 "huge alignment values are unsupported", &SI);
4179 }
4180 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4181 if (SI.isAtomic()) {
4182 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4183 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4184 "Store cannot have Acquire ordering", &SI);
4185 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4186 "atomic store operand must have integer, pointer, or floating point "
4187 "type!",
4188 ElTy, &SI);
4189 checkAtomicMemAccessSize(ElTy, &SI);
4190 } else {
4191 Check(SI.getSyncScopeID() == SyncScope::System,
4192 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4193 }
4194 visitInstruction(SI);
4195}
4196
4197/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4198void Verifier::verifySwiftErrorCall(CallBase &Call,
4199 const Value *SwiftErrorVal) {
4200 for (const auto &I : llvm::enumerate(Call.args())) {
4201 if (I.value() == SwiftErrorVal) {
4202 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4203 "swifterror value when used in a callsite should be marked "
4204 "with swifterror attribute",
4205 SwiftErrorVal, Call);
4206 }
4207 }
4208}
4209
4210void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4211 // Check that swifterror value is only used by loads, stores, or as
4212 // a swifterror argument.
4213 for (const User *U : SwiftErrorVal->users()) {
4214 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4215 isa<InvokeInst>(U),
4216 "swifterror value can only be loaded and stored from, or "
4217 "as a swifterror argument!",
4218 SwiftErrorVal, U);
4219 // If it is used by a store, check it is the second operand.
4220 if (auto StoreI = dyn_cast<StoreInst>(U))
4221 Check(StoreI->getOperand(1) == SwiftErrorVal,
4222 "swifterror value should be the second operand when used "
4223 "by stores",
4224 SwiftErrorVal, U);
4225 if (auto *Call = dyn_cast<CallBase>(U))
4226 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4227 }
4228}
4229
4230void Verifier::visitAllocaInst(AllocaInst &AI) {
4231 SmallPtrSet<Type*, 4> Visited;
4232 Check(AI.getAllocatedType()->isSized(&Visited),
4233 "Cannot allocate unsized type", &AI);
4235 "Alloca array size must have integer type", &AI);
4236 if (MaybeAlign A = AI.getAlign()) {
4237 Check(A->value() <= Value::MaximumAlignment,
4238 "huge alignment values are unsupported", &AI);
4239 }
4240
4241 if (AI.isSwiftError()) {
4243 "swifterror alloca must have pointer type", &AI);
4245 "swifterror alloca must not be array allocation", &AI);
4246 verifySwiftErrorValue(&AI);
4247 }
4248
4249 visitInstruction(AI);
4250}
4251
4252void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4253 Type *ElTy = CXI.getOperand(1)->getType();
4254 Check(ElTy->isIntOrPtrTy(),
4255 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4256 checkAtomicMemAccessSize(ElTy, &CXI);
4257 visitInstruction(CXI);
4258}
4259
4260void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4262 "atomicrmw instructions cannot be unordered.", &RMWI);
4263 auto Op = RMWI.getOperation();
4264 Type *ElTy = RMWI.getOperand(1)->getType();
4265 if (Op == AtomicRMWInst::Xchg) {
4266 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4267 ElTy->isPointerTy(),
4268 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4269 " operand must have integer or floating point type!",
4270 &RMWI, ElTy);
4271 } else if (AtomicRMWInst::isFPOperation(Op)) {
4272 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4273 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4274 " operand must have floating-point or fixed vector of floating-point "
4275 "type!",
4276 &RMWI, ElTy);
4277 } else {
4278 Check(ElTy->isIntegerTy(),
4279 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4280 " operand must have integer type!",
4281 &RMWI, ElTy);
4282 }
4283 checkAtomicMemAccessSize(ElTy, &RMWI);
4285 "Invalid binary operation!", &RMWI);
4286 visitInstruction(RMWI);
4287}
4288
4289void Verifier::visitFenceInst(FenceInst &FI) {
4290 const AtomicOrdering Ordering = FI.getOrdering();
4291 Check(Ordering == AtomicOrdering::Acquire ||
4292 Ordering == AtomicOrdering::Release ||
4293 Ordering == AtomicOrdering::AcquireRelease ||
4295 "fence instructions may only have acquire, release, acq_rel, or "
4296 "seq_cst ordering.",
4297 &FI);
4298 visitInstruction(FI);
4299}
4300
4301void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4303 EVI.getIndices()) == EVI.getType(),
4304 "Invalid ExtractValueInst operands!", &EVI);
4305
4306 visitInstruction(EVI);
4307}
4308
4309void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4311 IVI.getIndices()) ==
4312 IVI.getOperand(1)->getType(),
4313 "Invalid InsertValueInst operands!", &IVI);
4314
4315 visitInstruction(IVI);
4316}
4317
4318static Value *getParentPad(Value *EHPad) {
4319 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4320 return FPI->getParentPad();
4321
4322 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4323}
4324
4325void Verifier::visitEHPadPredecessors(Instruction &I) {
4326 assert(I.isEHPad());
4327
4328 BasicBlock *BB = I.getParent();
4329 Function *F = BB->getParent();
4330
4331 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4332
4333 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4334 // The landingpad instruction defines its parent as a landing pad block. The
4335 // landing pad block may be branched to only by the unwind edge of an
4336 // invoke.
4337 for (BasicBlock *PredBB : predecessors(BB)) {
4338 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4339 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4340 "Block containing LandingPadInst must be jumped to "
4341 "only by the unwind edge of an invoke.",
4342 LPI);
4343 }
4344 return;
4345 }
4346 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4347 if (!pred_empty(BB))
4348 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4349 "Block containg CatchPadInst must be jumped to "
4350 "only by its catchswitch.",
4351 CPI);
4352 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4353 "Catchswitch cannot unwind to one of its catchpads",
4354 CPI->getCatchSwitch(), CPI);
4355 return;
4356 }
4357
4358 // Verify that each pred has a legal terminator with a legal to/from EH
4359 // pad relationship.
4360 Instruction *ToPad = &I;
4361 Value *ToPadParent = getParentPad(ToPad);
4362 for (BasicBlock *PredBB : predecessors(BB)) {
4363 Instruction *TI = PredBB->getTerminator();
4364 Value *FromPad;
4365 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4366 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4367 "EH pad must be jumped to via an unwind edge", ToPad, II);
4368 auto *CalledFn =
4369 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4370 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4371 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4372 continue;
4373 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4374 FromPad = Bundle->Inputs[0];
4375 else
4376 FromPad = ConstantTokenNone::get(II->getContext());
4377 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4378 FromPad = CRI->getOperand(0);
4379 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4380 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4381 FromPad = CSI;
4382 } else {
4383 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4384 }
4385
4386 // The edge may exit from zero or more nested pads.
4388 for (;; FromPad = getParentPad(FromPad)) {
4389 Check(FromPad != ToPad,
4390 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4391 if (FromPad == ToPadParent) {
4392 // This is a legal unwind edge.
4393 break;
4394 }
4395 Check(!isa<ConstantTokenNone>(FromPad),
4396 "A single unwind edge may only enter one EH pad", TI);
4397 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4398 FromPad);
4399
4400 // This will be diagnosed on the corresponding instruction already. We
4401 // need the extra check here to make sure getParentPad() works.
4402 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4403 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4404 }
4405 }
4406}
4407
4408void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4409 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4410 // isn't a cleanup.
4411 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4412 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4413
4414 visitEHPadPredecessors(LPI);
4415
4416 if (!LandingPadResultTy)
4417 LandingPadResultTy = LPI.getType();
4418 else
4419 Check(LandingPadResultTy == LPI.getType(),
4420 "The landingpad instruction should have a consistent result type "
4421 "inside a function.",
4422 &LPI);
4423
4424 Function *F = LPI.getParent()->getParent();
4425 Check(F->hasPersonalityFn(),
4426 "LandingPadInst needs to be in a function with a personality.", &LPI);
4427
4428 // The landingpad instruction must be the first non-PHI instruction in the
4429 // block.
4430 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4431 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4432
4433 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4434 Constant *Clause = LPI.getClause(i);
4435 if (LPI.isCatch(i)) {
4436 Check(isa<PointerType>(Clause->getType()),
4437 "Catch operand does not have pointer type!", &LPI);
4438 } else {
4439 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4440 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4441 "Filter operand is not an array of constants!", &LPI);
4442 }
4443 }
4444
4445 visitInstruction(LPI);
4446}
4447
4448void Verifier::visitResumeInst(ResumeInst &RI) {
4450 "ResumeInst needs to be in a function with a personality.", &RI);
4451
4452 if (!LandingPadResultTy)
4453 LandingPadResultTy = RI.getValue()->getType();
4454 else
4455 Check(LandingPadResultTy == RI.getValue()->getType(),
4456 "The resume instruction should have a consistent result type "
4457 "inside a function.",
4458 &RI);
4459
4460 visitTerminator(RI);
4461}
4462
4463void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4464 BasicBlock *BB = CPI.getParent();
4465
4466 Function *F = BB->getParent();
4467 Check(F->hasPersonalityFn(),
4468 "CatchPadInst needs to be in a function with a personality.", &CPI);
4469
4470 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4471 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4472 CPI.getParentPad());
4473
4474 // The catchpad instruction must be the first non-PHI instruction in the
4475 // block.
4476 Check(BB->getFirstNonPHI() == &CPI,
4477 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4478
4479 visitEHPadPredecessors(CPI);
4481}
4482
4483void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4484 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4485 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4486 CatchReturn.getOperand(0));
4487
4488 visitTerminator(CatchReturn);
4489}
4490
4491void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4492 BasicBlock *BB = CPI.getParent();
4493
4494 Function *F = BB->getParent();
4495 Check(F->hasPersonalityFn(),
4496 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4497
4498 // The cleanuppad instruction must be the first non-PHI instruction in the
4499 // block.
4500 Check(BB->getFirstNonPHI() == &CPI,
4501 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4502
4503 auto *ParentPad = CPI.getParentPad();
4504 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4505 "CleanupPadInst has an invalid parent.", &CPI);
4506
4507 visitEHPadPredecessors(CPI);
4509}
4510
4511void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4512 User *FirstUser = nullptr;
4513 Value *FirstUnwindPad = nullptr;
4514 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4516
4517 while (!Worklist.empty()) {
4518 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4519 Check(Seen.insert(CurrentPad).second,
4520 "FuncletPadInst must not be nested within itself", CurrentPad);
4521 Value *UnresolvedAncestorPad = nullptr;
4522 for (User *U : CurrentPad->users()) {
4523 BasicBlock *UnwindDest;
4524 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4525 UnwindDest = CRI->getUnwindDest();
4526 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4527 // We allow catchswitch unwind to caller to nest
4528 // within an outer pad that unwinds somewhere else,
4529 // because catchswitch doesn't have a nounwind variant.
4530 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4531 if (CSI->unwindsToCaller())
4532 continue;
4533 UnwindDest = CSI->getUnwindDest();
4534 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4535 UnwindDest = II->getUnwindDest();
4536 } else if (isa<CallInst>(U)) {
4537 // Calls which don't unwind may be found inside funclet
4538 // pads that unwind somewhere else. We don't *require*
4539 // such calls to be annotated nounwind.
4540 continue;
4541 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4542 // The unwind dest for a cleanup can only be found by
4543 // recursive search. Add it to the worklist, and we'll
4544 // search for its first use that determines where it unwinds.
4545 Worklist.push_back(CPI);
4546 continue;
4547 } else {
4548 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4549 continue;
4550 }
4551
4552 Value *UnwindPad;
4553 bool ExitsFPI;
4554 if (UnwindDest) {
4555 UnwindPad = UnwindDest->getFirstNonPHI();
4556 if (!cast<Instruction>(UnwindPad)->isEHPad())
4557 continue;
4558 Value *UnwindParent = getParentPad(UnwindPad);
4559 // Ignore unwind edges that don't exit CurrentPad.
4560 if (UnwindParent == CurrentPad)
4561 continue;
4562 // Determine whether the original funclet pad is exited,
4563 // and if we are scanning nested pads determine how many
4564 // of them are exited so we can stop searching their
4565 // children.
4566 Value *ExitedPad = CurrentPad;
4567 ExitsFPI = false;
4568 do {
4569 if (ExitedPad == &FPI) {
4570 ExitsFPI = true;
4571 // Now we can resolve any ancestors of CurrentPad up to
4572 // FPI, but not including FPI since we need to make sure
4573 // to check all direct users of FPI for consistency.
4574 UnresolvedAncestorPad = &FPI;
4575 break;
4576 }
4577 Value *ExitedParent = getParentPad(ExitedPad);
4578 if (ExitedParent == UnwindParent) {
4579 // ExitedPad is the ancestor-most pad which this unwind
4580 // edge exits, so we can resolve up to it, meaning that
4581 // ExitedParent is the first ancestor still unresolved.
4582 UnresolvedAncestorPad = ExitedParent;
4583 break;
4584 }
4585 ExitedPad = ExitedParent;
4586 } while (!isa<ConstantTokenNone>(ExitedPad));
4587 } else {
4588 // Unwinding to caller exits all pads.
4589 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4590 ExitsFPI = true;
4591 UnresolvedAncestorPad = &FPI;
4592 }
4593
4594 if (ExitsFPI) {
4595 // This unwind edge exits FPI. Make sure it agrees with other
4596 // such edges.
4597 if (FirstUser) {
4598 Check(UnwindPad == FirstUnwindPad,
4599 "Unwind edges out of a funclet "
4600 "pad must have the same unwind "
4601 "dest",
4602 &FPI, U, FirstUser);
4603 } else {
4604 FirstUser = U;
4605 FirstUnwindPad = UnwindPad;
4606 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4607 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4608 getParentPad(UnwindPad) == getParentPad(&FPI))
4609 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4610 }
4611 }
4612 // Make sure we visit all uses of FPI, but for nested pads stop as
4613 // soon as we know where they unwind to.
4614 if (CurrentPad != &FPI)
4615 break;
4616 }
4617 if (UnresolvedAncestorPad) {
4618 if (CurrentPad == UnresolvedAncestorPad) {
4619 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4620 // we've found an unwind edge that exits it, because we need to verify
4621 // all direct uses of FPI.
4622 assert(CurrentPad == &FPI);
4623 continue;
4624 }
4625 // Pop off the worklist any nested pads that we've found an unwind
4626 // destination for. The pads on the worklist are the uncles,
4627 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4628 // for all ancestors of CurrentPad up to but not including
4629 // UnresolvedAncestorPad.
4630 Value *ResolvedPad = CurrentPad;
4631 while (!Worklist.empty()) {
4632 Value *UnclePad = Worklist.back();
4633 Value *AncestorPad = getParentPad(UnclePad);
4634 // Walk ResolvedPad up the ancestor list until we either find the
4635 // uncle's parent or the last resolved ancestor.
4636 while (ResolvedPad != AncestorPad) {
4637 Value *ResolvedParent = getParentPad(ResolvedPad);
4638 if (ResolvedParent == UnresolvedAncestorPad) {
4639 break;
4640 }
4641 ResolvedPad = ResolvedParent;
4642 }
4643 // If the resolved ancestor search didn't find the uncle's parent,
4644 // then the uncle is not yet resolved.
4645 if (ResolvedPad != AncestorPad)
4646 break;
4647 // This uncle is resolved, so pop it from the worklist.
4648 Worklist.pop_back();
4649 }
4650 }
4651 }
4652
4653 if (FirstUnwindPad) {
4654 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4655 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4656 Value *SwitchUnwindPad;
4657 if (SwitchUnwindDest)
4658 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4659 else
4660 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4661 Check(SwitchUnwindPad == FirstUnwindPad,
4662 "Unwind edges out of a catch must have the same unwind dest as "
4663 "the parent catchswitch",
4664 &FPI, FirstUser, CatchSwitch);
4665 }
4666 }
4667
4668 visitInstruction(FPI);
4669}
4670
4671void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4672 BasicBlock *BB = CatchSwitch.getParent();
4673
4674 Function *F = BB->getParent();
4675 Check(F->hasPersonalityFn(),
4676 "CatchSwitchInst needs to be in a function with a personality.",
4677 &CatchSwitch);
4678
4679 // The catchswitch instruction must be the first non-PHI instruction in the
4680 // block.
4681 Check(BB->getFirstNonPHI() == &CatchSwitch,
4682 "CatchSwitchInst not the first non-PHI instruction in the block.",
4683 &CatchSwitch);
4684
4685 auto *ParentPad = CatchSwitch.getParentPad();
4686 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4687 "CatchSwitchInst has an invalid parent.", ParentPad);
4688
4689 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4690 Instruction *I = UnwindDest->getFirstNonPHI();
4691 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4692 "CatchSwitchInst must unwind to an EH block which is not a "
4693 "landingpad.",
4694 &CatchSwitch);
4695
4696 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4697 if (getParentPad(I) == ParentPad)
4698 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4699 }
4700
4701 Check(CatchSwitch.getNumHandlers() != 0,
4702 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4703
4704 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4705 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4706 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4707 }
4708
4709 visitEHPadPredecessors(CatchSwitch);
4710 visitTerminator(CatchSwitch);
4711}
4712
4713void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4714 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4715 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4716 CRI.getOperand(0));
4717
4718 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4719 Instruction *I = UnwindDest->getFirstNonPHI();
4720 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4721 "CleanupReturnInst must unwind to an EH block which is not a "
4722 "landingpad.",
4723 &CRI);
4724 }
4725
4726 visitTerminator(CRI);
4727}
4728
4729void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4730 Instruction *Op = cast<Instruction>(I.getOperand(i));
4731 // If the we have an invalid invoke, don't try to compute the dominance.
4732 // We already reject it in the invoke specific checks and the dominance
4733 // computation doesn't handle multiple edges.
4734 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4735 if (II->getNormalDest() == II->getUnwindDest())
4736 return;
4737 }
4738
4739 // Quick check whether the def has already been encountered in the same block.
4740 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4741 // uses are defined to happen on the incoming edge, not at the instruction.
4742 //
4743 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4744 // wrapping an SSA value, assert that we've already encountered it. See
4745 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4746 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4747 return;
4748
4749 const Use &U = I.getOperandUse(i);
4750 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4751}
4752
4753void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4754 Check(I.getType()->isPointerTy(),
4755 "dereferenceable, dereferenceable_or_null "
4756 "apply only to pointer types",
4757 &I);
4758 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4759 "dereferenceable, dereferenceable_or_null apply only to load"
4760 " and inttoptr instructions, use attributes for calls or invokes",
4761 &I);
4762 Check(MD->getNumOperands() == 1,
4763 "dereferenceable, dereferenceable_or_null "
4764 "take one operand!",
4765 &I);
4766 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4767 Check(CI && CI->getType()->isIntegerTy(64),
4768 "dereferenceable, "
4769 "dereferenceable_or_null metadata value must be an i64!",
4770 &I);
4771}
4772
4773void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4774 Check(MD->getNumOperands() >= 2,
4775 "!prof annotations should have no less than 2 operands", MD);
4776
4777 // Check first operand.
4778 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4779 Check(isa<MDString>(MD->getOperand(0)),
4780 "expected string with name of the !prof annotation", MD);
4781 MDString *MDS = cast<MDString>(MD->getOperand(0));
4782 StringRef ProfName = MDS->getString();
4783
4784 // Check consistency of !prof branch_weights metadata.
4785 if (ProfName.equals("branch_weights")) {
4786 if (isa<InvokeInst>(&I)) {
4787 Check(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
4788 "Wrong number of InvokeInst branch_weights operands", MD);
4789 } else {
4790 unsigned ExpectedNumOperands = 0;
4791 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4792 ExpectedNumOperands = BI->getNumSuccessors();
4793 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4794 ExpectedNumOperands = SI->getNumSuccessors();
4795 else if (isa<CallInst>(&I))
4796 ExpectedNumOperands = 1;
4797 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4798 ExpectedNumOperands = IBI->getNumDestinations();
4799 else if (isa<SelectInst>(&I))
4800 ExpectedNumOperands = 2;
4801 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4802 ExpectedNumOperands = CI->getNumSuccessors();
4803 else
4804 CheckFailed("!prof branch_weights are not allowed for this instruction",
4805 MD);
4806
4807 Check(MD->getNumOperands() == 1 + ExpectedNumOperands,
4808 "Wrong number of operands", MD);
4809 }
4810 for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4811 auto &MDO = MD->getOperand(i);
4812 Check(MDO, "second operand should not be null", MD);
4813 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4814 "!prof brunch_weights operand is not a const int");
4815 }
4816 }
4817}
4818
4819void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4820 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4821 bool ExpectedInstTy =
4822 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4823 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4824 I, MD);
4825 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4826 // only be found as DbgAssignIntrinsic operands.
4827 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4828 for (auto *User : AsValue->users()) {
4829 CheckDI(isa<DbgAssignIntrinsic>(User),
4830 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4831 MD, User);
4832 // All of the dbg.assign intrinsics should be in the same function as I.
4833 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4834 CheckDI(DAI->getFunction() == I.getFunction(),
4835 "dbg.assign not in same function as inst", DAI, &I);
4836 }
4837 }
4838 for (DbgVariableRecord *DVR :
4839 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4840 CheckDI(DVR->isDbgAssign(),
4841 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4842 CheckDI(DVR->getFunction() == I.getFunction(),
4843 "DVRAssign not in same function as inst", DVR, &I);
4844 }
4845}
4846
4847void Verifier::visitCallStackMetadata(MDNode *MD) {
4848 // Call stack metadata should consist of a list of at least 1 constant int
4849 // (representing a hash of the location).
4850 Check(MD->getNumOperands() >= 1,
4851 "call stack metadata should have at least 1 operand", MD);
4852
4853 for (const auto &Op : MD->operands())
4854 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4855 "call stack metadata operand should be constant integer", Op);
4856}
4857
4858void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4859 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4860 Check(MD->getNumOperands() >= 1,
4861 "!memprof annotations should have at least 1 metadata operand "
4862 "(MemInfoBlock)",
4863 MD);
4864
4865 // Check each MIB
4866 for (auto &MIBOp : MD->operands()) {
4867 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4868 // The first operand of an MIB should be the call stack metadata.
4869 // There rest of the operands should be MDString tags, and there should be
4870 // at least one.
4871 Check(MIB->getNumOperands() >= 2,
4872 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4873
4874 // Check call stack metadata (first operand).
4875 Check(MIB->getOperand(0) != nullptr,
4876 "!memprof MemInfoBlock first operand should not be null", MIB);
4877 Check(isa<MDNode>(MIB->getOperand(0)),
4878 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4879 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4880 visitCallStackMetadata(StackMD);
4881
4882 // Check that remaining operands are MDString.
4884 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4885 "Not all !memprof MemInfoBlock operands 1 to N are MDString", MIB);
4886 }
4887}
4888
4889void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4890 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4891 // Verify the partial callstack annotated from memprof profiles. This callsite
4892 // is a part of a profiled allocation callstack.
4893 visitCallStackMetadata(MD);
4894}
4895
4896void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4897 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4898 Check(Annotation->getNumOperands() >= 1,
4899 "annotation must have at least one operand");
4900 for (const MDOperand &Op : Annotation->operands()) {
4901 bool TupleOfStrings =
4902 isa<MDTuple>(Op.get()) &&
4903 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4904 return isa<MDString>(Annotation.get());
4905 });
4906 Check(isa<MDString>(Op.get()) || TupleOfStrings,
4907 "operands must be a string or a tuple of strings");
4908 }
4909}
4910
4911void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4912 unsigned NumOps = MD->getNumOperands();
4913 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4914 MD);
4915 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4916 "first scope operand must be self-referential or string", MD);
4917 if (NumOps == 3)
4918 Check(isa<MDString>(MD->getOperand(2)),
4919 "third scope operand must be string (if used)", MD);
4920
4921 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4922 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4923
4924 unsigned NumDomainOps = Domain->getNumOperands();
4925 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
4926 "domain must have one or two operands", Domain);
4927 Check(Domain->getOperand(0).get() == Domain ||
4928 isa<MDString>(Domain->getOperand(0)),
4929 "first domain operand must be self-referential or string", Domain);
4930 if (NumDomainOps == 2)
4931 Check(isa<MDString>(Domain->getOperand(1)),
4932 "second domain operand must be string (if used)", Domain);
4933}
4934
4935void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
4936 for (const MDOperand &Op : MD->operands()) {
4937 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4938 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
4939 visitAliasScopeMetadata(OpMD);
4940 }
4941}
4942
4943void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
4944 auto IsValidAccessScope = [](const MDNode *MD) {
4945 return MD->getNumOperands() == 0 && MD->isDistinct();
4946 };
4947
4948 // It must be either an access scope itself...
4949 if (IsValidAccessScope(MD))
4950 return;
4951
4952 // ...or a list of access scopes.
4953 for (const MDOperand &Op : MD->operands()) {
4954 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4955 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
4956 Check(IsValidAccessScope(OpMD),
4957 "Access scope list contains invalid access scope", MD);
4958 }
4959}
4960
4961/// verifyInstruction - Verify that an instruction is well formed.
4962///
4963void Verifier::visitInstruction(Instruction &I) {
4964 BasicBlock *BB = I.getParent();
4965 Check(BB, "Instruction not embedded in basic block!", &I);
4966
4967 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
4968 for (User *U : I.users()) {
4969 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
4970 "Only PHI nodes may reference their own value!", &I);
4971 }
4972 }
4973
4974 // Check that void typed values don't have names
4975 Check(!I.getType()->isVoidTy() || !I.hasName(),
4976 "Instruction has a name, but provides a void value!", &I);
4977
4978 // Check that the return value of the instruction is either void or a legal
4979 // value type.
4980 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4981 "Instruction returns a non-scalar type!", &I);
4982
4983 // Check that the instruction doesn't produce metadata. Calls are already
4984 // checked against the callee type.
4985 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4986 "Invalid use of metadata!", &I);
4987
4988 // Check that all uses of the instruction, if they are instructions
4989 // themselves, actually have parent basic blocks. If the use is not an
4990 // instruction, it is an error!
4991 for (Use &U : I.uses()) {
4992 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4993 Check(Used->getParent() != nullptr,
4994 "Instruction referencing"
4995 " instruction not embedded in a basic block!",
4996 &I, Used);
4997 else {
4998 CheckFailed("Use of instruction is not an instruction!", U);
4999 return;
5000 }
5001 }
5002
5003 // Get a pointer to the call base of the instruction if it is some form of
5004 // call.
5005 const CallBase *CBI = dyn_cast<CallBase>(&I);
5006
5007 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5008 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5009
5010 // Check to make sure that only first-class-values are operands to
5011 // instructions.
5012 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5013 Check(false, "Instruction operands must be first-class values!", &I);
5014 }
5015
5016 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5017 // This code checks whether the function is used as the operand of a
5018 // clang_arc_attachedcall operand bundle.
5019 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5020 int Idx) {
5021 return CBI && CBI->isOperandBundleOfType(
5023 };
5024
5025 // Check to make sure that the "address of" an intrinsic function is never
5026 // taken. Ignore cases where the address of the intrinsic function is used
5027 // as the argument of operand bundle "clang.arc.attachedcall" as those
5028 // cases are handled in verifyAttachedCallBundle.
5029 Check((!F->isIntrinsic() ||
5030 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5031 IsAttachedCallOperand(F, CBI, i)),
5032 "Cannot take the address of an intrinsic!", &I);
5033 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5034 F->getIntrinsicID() == Intrinsic::donothing ||
5035 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5036 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5037 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5038 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5039 F->getIntrinsicID() == Intrinsic::coro_resume ||
5040 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5041 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5042 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5043 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5044 F->getIntrinsicID() ==
5045 Intrinsic::experimental_patchpoint_void ||
5046 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5047 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5048 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5049 IsAttachedCallOperand(F, CBI, i),
5050 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5051 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5052 &I);
5053 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5054 &M, F, F->getParent());
5055 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5056 Check(OpBB->getParent() == BB->getParent(),
5057 "Referring to a basic block in another function!", &I);
5058 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5059 Check(OpArg->getParent() == BB->getParent(),
5060 "Referring to an argument in another function!", &I);
5061 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5062 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5063 &M, GV, GV->getParent());
5064 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5065 Check(OpInst->getFunction() == BB->getParent(),
5066 "Referring to an instruction in another function!", &I);
5067 verifyDominatesUse(I, i);
5068 } else if (isa<InlineAsm>(I.getOperand(i))) {
5069 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5070 "Cannot take the address of an inline asm!", &I);
5071 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5072 if (CE->getType()->isPtrOrPtrVectorTy()) {
5073 // If we have a ConstantExpr pointer, we need to see if it came from an
5074 // illegal bitcast.
5075 visitConstantExprsRecursively(CE);
5076 }
5077 }
5078 }
5079
5080 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5081 Check(I.getType()->isFPOrFPVectorTy(),
5082 "fpmath requires a floating point result!", &I);
5083 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5084 if (ConstantFP *CFP0 =
5085 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5086 const APFloat &Accuracy = CFP0->getValueAPF();
5087 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5088 "fpmath accuracy must have float type", &I);
5089 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5090 "fpmath accuracy not a positive number!", &I);
5091 } else {
5092 Check(false, "invalid fpmath accuracy!", &I);
5093 }
5094 }
5095
5096 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5097 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5098 "Ranges are only for loads, calls and invokes!", &I);
5099 visitRangeMetadata(I, Range, I.getType());
5100 }
5101
5102 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5103 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5104 "invariant.group metadata is only for loads and stores", &I);
5105 }
5106
5107 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5108 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5109 &I);
5110 Check(isa<LoadInst>(I),
5111 "nonnull applies only to load instructions, use attributes"
5112 " for calls or invokes",
5113 &I);
5114 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5115 }
5116
5117 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5118 visitDereferenceableMetadata(I, MD);
5119
5120 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5121 visitDereferenceableMetadata(I, MD);
5122
5123 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5124 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5125
5126 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa_struct))
5127 TBAAVerifyHelper.visitTBAAStructMetadata(I, TBAA);
5128
5129 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5130 visitAliasScopeListMetadata(MD);
5131 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5132 visitAliasScopeListMetadata(MD);
5133
5134 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5135 visitAccessGroupMetadata(MD);
5136
5137 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5138 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5139 &I);
5140 Check(isa<LoadInst>(I),
5141 "align applies only to load instructions, "
5142 "use attributes for calls or invokes",
5143 &I);
5144 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5145 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5146 Check(CI && CI->getType()->isIntegerTy(64),
5147 "align metadata value must be an i64!", &I);
5148 uint64_t Align = CI->getZExtValue();
5149 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5150 &I);
5152 "alignment is larger that implementation defined limit", &I);
5153 }
5154
5155 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5156 visitProfMetadata(I, MD);
5157
5158 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5159 visitMemProfMetadata(I, MD);
5160
5161 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5162 visitCallsiteMetadata(I, MD);
5163
5164 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5165 visitDIAssignIDMetadata(I, MD);
5166
5167 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5168 visitAnnotationMetadata(Annotation);
5169
5170 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5171 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5172 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5173 }
5174
5175 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5176 verifyFragmentExpression(*DII);
5177 verifyNotEntryValue(*DII);
5178 }
5179
5181 I.getAllMetadata(MDs);
5182 for (auto Attachment : MDs) {
5183 unsigned Kind = Attachment.first;
5184 auto AllowLocs =
5185 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5186 ? AreDebugLocsAllowed::Yes
5187 : AreDebugLocsAllowed::No;
5188 visitMDNode(*Attachment.second, AllowLocs);
5189 }
5190
5191 InstsInThisBlock.insert(&I);
5192}
5193
5194/// Allow intrinsics to be verified in different ways.
5195void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5196 Function *IF = Call.getCalledFunction();
5197 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5198 IF);
5199
5200 // Verify that the intrinsic prototype lines up with what the .td files
5201 // describe.
5202 FunctionType *IFTy = IF->getFunctionType();
5203 bool IsVarArg = IFTy->isVarArg();
5204
5208
5209 // Walk the descriptors to extract overloaded types.
5214 "Intrinsic has incorrect return type!", IF);
5216 "Intrinsic has incorrect argument type!", IF);
5217
5218 // Verify if the intrinsic call matches the vararg property.
5219 if (IsVarArg)
5221 "Intrinsic was not defined with variable arguments!", IF);
5222 else
5224 "Callsite was not defined with variable arguments!", IF);
5225
5226 // All descriptors should be absorbed by now.
5227 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5228
5229 // Now that we have the intrinsic ID and the actual argument types (and we
5230 // know they are legal for the intrinsic!) get the intrinsic name through the
5231 // usual means. This allows us to verify the mangling of argument types into
5232 // the name.
5233 const std::string ExpectedName =
5234 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5235 Check(ExpectedName == IF->getName(),
5236 "Intrinsic name not mangled correctly for type arguments! "
5237 "Should be: " +
5238 ExpectedName,
5239 IF);
5240
5241 // If the intrinsic takes MDNode arguments, verify that they are either global
5242 // or are local to *this* function.
5243 for (Value *V : Call.args()) {
5244 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5245 visitMetadataAsValue(*MD, Call.getCaller());
5246 if (auto *Const = dyn_cast<Constant>(V))
5247 Check(!Const->getType()->isX86_AMXTy(),
5248 "const x86_amx is not allowed in argument!");
5249 }
5250
5251 switch (ID) {
5252 default:
5253 break;
5254 case Intrinsic::assume: {
5255 for (auto &Elem : Call.bundle_op_infos()) {
5256 unsigned ArgCount = Elem.End - Elem.Begin;
5257 // Separate storage assumptions are special insofar as they're the only
5258 // operand bundles allowed on assumes that aren't parameter attributes.
5259 if (Elem.Tag->getKey() == "separate_storage") {
5260 Check(ArgCount == 2,
5261 "separate_storage assumptions should have 2 arguments", Call);
5262 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5263 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5264 "arguments to separate_storage assumptions should be pointers",
5265 Call);
5266 return;
5267 }
5268 Check(Elem.Tag->getKey() == "ignore" ||
5269 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5270 "tags must be valid attribute names", Call);
5272 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5273 if (Kind == Attribute::Alignment) {
5274 Check(ArgCount <= 3 && ArgCount >= 2,
5275 "alignment assumptions should have 2 or 3 arguments", Call);
5276 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5277 "first argument should be a pointer", Call);
5278 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5279 "second argument should be an integer", Call);
5280 if (ArgCount == 3)
5281 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5282 "third argument should be an integer if present", Call);
5283 return;
5284 }
5285 Check(ArgCount <= 2, "too many arguments", Call);
5286 if (Kind == Attribute::None)
5287 break;
5288 if (Attribute::isIntAttrKind(Kind)) {
5289 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5290 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5291 "the second argument should be a constant integral value", Call);
5292 } else if (Attribute::canUseAsParamAttr(Kind)) {
5293 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5294 } else if (Attribute::canUseAsFnAttr(Kind)) {
5295 Check((ArgCount) == 0, "this attribute has no argument", Call);
5296 }
5297 }
5298 break;
5299 }
5300 case Intrinsic::ucmp:
5301 case Intrinsic::scmp: {
5302 Type *SrcTy = Call.getOperand(0)->getType();
5303 Type *DestTy = Call.getType();
5304
5305 Check(DestTy->getScalarSizeInBits() >= 2,
5306 "result type must be at least 2 bits wide", Call);
5307
5308 bool IsDestTypeVector = DestTy->isVectorTy();
5309 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5310 "ucmp/scmp argument and result types must both be either vector or "
5311 "scalar types",
5312 Call);
5313 if (IsDestTypeVector) {
5314 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5315 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5316 Check(SrcVecLen == DestVecLen,
5317 "return type and arguments must have the same number of "
5318 "elements",
5319 Call);
5320 }
5321 break;
5322 }
5323 case Intrinsic::coro_id: {
5324 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5325 if (isa<ConstantPointerNull>(InfoArg))
5326 break;
5327 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5328 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5329 "info argument of llvm.coro.id must refer to an initialized "
5330 "constant");
5331 Constant *Init = GV->getInitializer();
5332 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5333 "info argument of llvm.coro.id must refer to either a struct or "
5334 "an array");
5335 break;
5336 }
5337 case Intrinsic::is_fpclass: {
5338 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5339 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5340 "unsupported bits for llvm.is.fpclass test mask");
5341 break;
5342 }
5343 case Intrinsic::fptrunc_round: {
5344 // Check the rounding mode
5345 Metadata *MD = nullptr;
5346 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5347 if (MAV)
5348 MD = MAV->getMetadata();
5349
5350 Check(MD != nullptr, "missing rounding mode argument", Call);
5351
5352 Check(isa<MDString>(MD),
5353 ("invalid value for llvm.fptrunc.round metadata operand"
5354 " (the operand should be a string)"),
5355 MD);
5356
5357 std::optional<RoundingMode> RoundMode =
5358 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5359 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5360 "unsupported rounding mode argument", Call);
5361 break;
5362 }
5363#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5364#include "llvm/IR/VPIntrinsics.def"
5365 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5366 break;
5367#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5368 case Intrinsic::INTRINSIC:
5369#include "llvm/IR/ConstrainedOps.def"
5370 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5371 break;
5372 case Intrinsic::dbg_declare: // llvm.dbg.declare
5373 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5374 "invalid llvm.dbg.declare intrinsic call 1", Call);
5375 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5376 break;
5377 case Intrinsic::dbg_value: // llvm.dbg.value
5378 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5379 break;
5380 case Intrinsic::dbg_assign: // llvm.dbg.assign
5381 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5382 break;
5383 case Intrinsic::dbg_label: // llvm.dbg.label
5384 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5385 break;
5386 case Intrinsic::memcpy:
5387 case Intrinsic::memcpy_inline:
5388 case Intrinsic::memmove:
5389 case Intrinsic::memset:
5390 case Intrinsic::memset_inline: {
5391 break;
5392 }
5393 case Intrinsic::memcpy_element_unordered_atomic:
5394 case Intrinsic::memmove_element_unordered_atomic:
5395 case Intrinsic::memset_element_unordered_atomic: {
5396 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5397
5398 ConstantInt *ElementSizeCI =
5399 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5400 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5401 Check(ElementSizeVal.isPowerOf2(),
5402 "element size of the element-wise atomic memory intrinsic "
5403 "must be a power of 2",
5404 Call);
5405
5406 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5407 return Alignment && ElementSizeVal.ule(Alignment->value());
5408 };
5409 Check(IsValidAlignment(AMI->getDestAlign()),
5410 "incorrect alignment of the destination argument", Call);
5411 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5412 Check(IsValidAlignment(AMT->getSourceAlign()),
5413 "incorrect alignment of the source argument", Call);
5414 }
5415 break;
5416 }
5417 case Intrinsic::call_preallocated_setup: {
5418 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5419 Check(NumArgs != nullptr,
5420 "llvm.call.preallocated.setup argument must be a constant");
5421 bool FoundCall = false;
5422 for (User *U : Call.users()) {
5423 auto *UseCall = dyn_cast<CallBase>(U);
5424 Check(UseCall != nullptr,
5425 "Uses of llvm.call.preallocated.setup must be calls");
5426 const Function *Fn = UseCall->getCalledFunction();
5427 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5428 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5429 Check(AllocArgIndex != nullptr,
5430 "llvm.call.preallocated.alloc arg index must be a constant");
5431 auto AllocArgIndexInt = AllocArgIndex->getValue();
5432 Check(AllocArgIndexInt.sge(0) &&
5433 AllocArgIndexInt.slt(NumArgs->getValue()),
5434 "llvm.call.preallocated.alloc arg index must be between 0 and "
5435 "corresponding "
5436 "llvm.call.preallocated.setup's argument count");
5437 } else if (Fn && Fn->getIntrinsicID() ==
5438 Intrinsic::call_preallocated_teardown) {
5439 // nothing to do
5440 } else {
5441 Check(!FoundCall, "Can have at most one call corresponding to a "
5442 "llvm.call.preallocated.setup");
5443 FoundCall = true;
5444 size_t NumPreallocatedArgs = 0;
5445 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5446 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5447 ++NumPreallocatedArgs;
5448 }
5449 }
5450 Check(NumPreallocatedArgs != 0,
5451 "cannot use preallocated intrinsics on a call without "
5452 "preallocated arguments");
5453 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5454 "llvm.call.preallocated.setup arg size must be equal to number "
5455 "of preallocated arguments "
5456 "at call site",
5457 Call, *UseCall);
5458 // getOperandBundle() cannot be called if more than one of the operand
5459 // bundle exists. There is already a check elsewhere for this, so skip
5460 // here if we see more than one.
5461 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5462 1) {
5463 return;
5464 }
5465 auto PreallocatedBundle =
5466 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5467 Check(PreallocatedBundle,
5468 "Use of llvm.call.preallocated.setup outside intrinsics "
5469 "must be in \"preallocated\" operand bundle");
5470 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5471 "preallocated bundle must have token from corresponding "
5472 "llvm.call.preallocated.setup");
5473 }
5474 }
5475 break;
5476 }
5477 case Intrinsic::call_preallocated_arg: {
5478 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5479 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5480 Intrinsic::call_preallocated_setup,
5481 "llvm.call.preallocated.arg token argument must be a "
5482 "llvm.call.preallocated.setup");
5483 Check(Call.hasFnAttr(Attribute::Preallocated),
5484 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5485 "call site attribute");
5486 break;
5487 }
5488 case Intrinsic::call_preallocated_teardown: {
5489 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5490 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5491 Intrinsic::call_preallocated_setup,
5492 "llvm.call.preallocated.teardown token argument must be a "
5493 "llvm.call.preallocated.setup");
5494 break;
5495 }
5496 case Intrinsic::gcroot:
5497 case Intrinsic::gcwrite:
5498 case Intrinsic::gcread:
5499 if (ID == Intrinsic::gcroot) {
5500 AllocaInst *AI =
5501 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5502 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5503 Check(isa<Constant>(Call.getArgOperand(1)),
5504 "llvm.gcroot parameter #2 must be a constant.", Call);
5505 if (!AI->getAllocatedType()->isPointerTy()) {
5506 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5507 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5508 "or argument #2 must be a non-null constant.",
5509 Call);
5510 }
5511 }
5512
5513 Check(Call.getParent()->getParent()->hasGC(),
5514 "Enclosing function does not use GC.", Call);
5515 break;
5516 case Intrinsic::init_trampoline:
5517 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5518 "llvm.init_trampoline parameter #2 must resolve to a function.",
5519 Call);
5520 break;
5521 case Intrinsic::prefetch:
5522 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5523 "rw argument to llvm.prefetch must be 0-1", Call);
5524 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5525 "locality argument to llvm.prefetch must be 0-3", Call);
5526 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5527 "cache type argument to llvm.prefetch must be 0-1", Call);
5528 break;
5529 case Intrinsic::stackprotector:
5530 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5531 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5532 break;
5533 case Intrinsic::localescape: {
5534 BasicBlock *BB = Call.getParent();
5535 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5536 Call);
5537 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5538 Call);
5539 for (Value *Arg : Call.args()) {
5540 if (isa<ConstantPointerNull>(Arg))
5541 continue; // Null values are allowed as placeholders.
5542 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5543 Check(AI && AI->isStaticAlloca(),
5544 "llvm.localescape only accepts static allocas", Call);
5545 }
5546 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5547 SawFrameEscape = true;
5548 break;
5549 }
5550 case Intrinsic::localrecover: {
5551 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5552 Function *Fn = dyn_cast<Function>(FnArg);
5553 Check(Fn && !Fn->isDeclaration(),
5554 "llvm.localrecover first "
5555 "argument must be function defined in this module",
5556 Call);
5557 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5558 auto &Entry = FrameEscapeInfo[Fn];
5559 Entry.second = unsigned(
5560 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5561 break;
5562 }
5563
5564 case Intrinsic::experimental_gc_statepoint:
5565 if (auto *CI = dyn_cast<CallInst>(&Call))
5566 Check(!CI->isInlineAsm(),
5567 "gc.statepoint support for inline assembly unimplemented", CI);
5568 Check(Call.getParent()->getParent()->hasGC(),
5569 "Enclosing function does not use GC.", Call);
5570
5571 verifyStatepoint(Call);
5572 break;
5573 case Intrinsic::experimental_gc_result: {
5574 Check(Call.getParent()->getParent()->hasGC(),
5575 "Enclosing function does not use GC.", Call);
5576
5577 auto *Statepoint = Call.getArgOperand(0);
5578 if (isa<UndefValue>(Statepoint))
5579 break;
5580
5581 // Are we tied to a statepoint properly?
5582 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5583 const Function *StatepointFn =
5584 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5585 Check(StatepointFn && StatepointFn->isDeclaration() &&
5586 StatepointFn->getIntrinsicID() ==
5587 Intrinsic::experimental_gc_statepoint,
5588 "gc.result operand #1 must be from a statepoint", Call,
5589 Call.getArgOperand(0));
5590
5591 // Check that result type matches wrapped callee.
5592 auto *TargetFuncType =
5593 cast<FunctionType>(StatepointCall->getParamElementType(2));
5594 Check(Call.getType() == TargetFuncType->getReturnType(),
5595 "gc.result result type does not match wrapped callee", Call);
5596 break;
5597 }
5598 case Intrinsic::experimental_gc_relocate: {
5599 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5600
5601 Check(isa<PointerType>(Call.getType()->getScalarType()),
5602 "gc.relocate must return a pointer or a vector of pointers", Call);
5603
5604 // Check that this relocate is correctly tied to the statepoint
5605
5606 // This is case for relocate on the unwinding path of an invoke statepoint
5607 if (LandingPadInst *LandingPad =
5608 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5609
5610 const BasicBlock *InvokeBB =
5611 LandingPad->getParent()->getUniquePredecessor();
5612
5613 // Landingpad relocates should have only one predecessor with invoke
5614 // statepoint terminator
5615 Check(InvokeBB, "safepoints should have unique landingpads",
5616 LandingPad->getParent());
5617 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5618 InvokeBB);
5619 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5620 "gc relocate should be linked to a statepoint", InvokeBB);
5621 } else {
5622 // In all other cases relocate should be tied to the statepoint directly.
5623 // This covers relocates on a normal return path of invoke statepoint and
5624 // relocates of a call statepoint.
5625 auto *Token = Call.getArgOperand(0);
5626 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5627 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5628 }
5629
5630 // Verify rest of the relocate arguments.
5631 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5632
5633 // Both the base and derived must be piped through the safepoint.
5634 Value *Base = Call.getArgOperand(1);
5635 Check(isa<ConstantInt>(Base),
5636 "gc.relocate operand #2 must be integer offset", Call);
5637
5638 Value *Derived = Call.getArgOperand(2);
5639 Check(isa<ConstantInt>(Derived),
5640 "gc.relocate operand #3 must be integer offset", Call);
5641
5642 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5643 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5644
5645 // Check the bounds
5646 if (isa<UndefValue>(StatepointCall))
5647 break;
5648 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5649 .getOperandBundle(LLVMContext::OB_gc_live)) {
5650 Check(BaseIndex < Opt->Inputs.size(),
5651 "gc.relocate: statepoint base index out of bounds", Call);
5652 Check(DerivedIndex < Opt->Inputs.size(),
5653 "gc.relocate: statepoint derived index out of bounds", Call);
5654 }
5655
5656 // Relocated value must be either a pointer type or vector-of-pointer type,
5657 // but gc_relocate does not need to return the same pointer type as the
5658 // relocated pointer. It can be casted to the correct type later if it's
5659 // desired. However, they must have the same address space and 'vectorness'
5660 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5661 auto *ResultType = Call.getType();
5662 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5663 auto *BaseType = Relocate.getBasePtr()->getType();
5664
5665 Check(BaseType->isPtrOrPtrVectorTy(),
5666 "gc.relocate: relocated value must be a pointer", Call);
5667 Check(DerivedType->isPtrOrPtrVectorTy(),
5668 "gc.relocate: relocated value must be a pointer", Call);
5669
5670 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5671 "gc.relocate: vector relocates to vector and pointer to pointer",
5672 Call);
5673 Check(
5674 ResultType->getPointerAddressSpace() ==
5675 DerivedType->getPointerAddressSpace(),
5676 "gc.relocate: relocating a pointer shouldn't change its address space",
5677 Call);
5678
5679 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5680 Check(GC, "gc.relocate: calling function must have GCStrategy",
5681 Call.getFunction());
5682 if (GC) {
5683 auto isGCPtr = [&GC](Type *PTy) {
5684 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5685 };
5686 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5687 Check(isGCPtr(BaseType),
5688 "gc.relocate: relocated value must be a gc pointer", Call);
5689 Check(isGCPtr(DerivedType),
5690 "gc.relocate: relocated value must be a gc pointer", Call);
5691 }
5692 break;
5693 }
5694 case Intrinsic::experimental_patchpoint: {
5695 if (Call.getCallingConv() == CallingConv::AnyReg) {
5696 Check(Call.getType()->isSingleValueType(),
5697 "patchpoint: invalid return type used with anyregcc", Call);
5698 }
5699 break;
5700 }
5701 case Intrinsic::eh_exceptioncode:
5702 case Intrinsic::eh_exceptionpointer: {
5703 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5704 "eh.exceptionpointer argument must be a catchpad", Call);
5705 break;
5706 }
5707 case Intrinsic::get_active_lane_mask: {
5708 Check(Call.getType()->isVectorTy(),
5709 "get_active_lane_mask: must return a "
5710 "vector",
5711 Call);
5712 auto *ElemTy = Call.getType()->getScalarType();
5713 Check(ElemTy->isIntegerTy(1),
5714 "get_active_lane_mask: element type is not "
5715 "i1",
5716 Call);
5717 break;
5718 }
5719 case Intrinsic::experimental_get_vector_length: {
5720 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5721 Check(!VF->isNegative() && !VF->isZero(),
5722 "get_vector_length: VF must be positive", Call);
5723 break;
5724 }
5725 case Intrinsic::masked_load: {
5726 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5727 Call);
5728
5729 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5730 Value *Mask = Call.getArgOperand(2);
5731 Value *PassThru = Call.getArgOperand(3);
5732 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5733 Call);
5734 Check(Alignment->getValue().isPowerOf2(),
5735 "masked_load: alignment must be a power of 2", Call);
5736 Check(PassThru->getType() == Call.getType(),
5737 "masked_load: pass through and return type must match", Call);
5738 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5739 cast<VectorType>(Call.getType())->getElementCount(),
5740 "masked_load: vector mask must be same length as return", Call);
5741 break;
5742 }
5743 case Intrinsic::masked_store: {
5744 Value *Val = Call.getArgOperand(0);
5745 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5746 Value *Mask = Call.getArgOperand(3);
5747 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5748 Call);
5749 Check(Alignment->getValue().isPowerOf2(),
5750 "masked_store: alignment must be a power of 2", Call);
5751 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5752 cast<VectorType>(Val->getType())->getElementCount(),
5753 "masked_store: vector mask must be same length as value", Call);
5754 break;
5755 }
5756
5757 case Intrinsic::masked_gather: {
5758 const APInt &Alignment =
5759 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5760 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5761 "masked_gather: alignment must be 0 or a power of 2", Call);
5762 break;
5763 }
5764 case Intrinsic::masked_scatter: {
5765 const APInt &Alignment =
5766 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5767 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5768 "masked_scatter: alignment must be 0 or a power of 2", Call);
5769 break;
5770 }
5771
5772 case Intrinsic::experimental_guard: {
5773 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5774 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5775 "experimental_guard must have exactly one "
5776 "\"deopt\" operand bundle");
5777 break;
5778 }
5779
5780 case Intrinsic::experimental_deoptimize: {
5781 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5782 Call);
5783 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5784 "experimental_deoptimize must have exactly one "
5785 "\"deopt\" operand bundle");
5786 Check(Call.getType() == Call.getFunction()->getReturnType(),
5787 "experimental_deoptimize return type must match caller return type");
5788
5789 if (isa<CallInst>(Call)) {
5790 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5791 Check(RI,
5792 "calls to experimental_deoptimize must be followed by a return");
5793
5794 if (!Call.getType()->isVoidTy() && RI)
5795 Check(RI->getReturnValue() == &Call,
5796 "calls to experimental_deoptimize must be followed by a return "
5797 "of the value computed by experimental_deoptimize");
5798 }
5799
5800 break;
5801 }
5802 case Intrinsic::vastart: {
5803 Check(Call.getFunction()->isVarArg(),
5804 "va_start called in a non-varargs function");
5805 break;
5806 }
5807 case Intrinsic::vector_reduce_and:
5808 case Intrinsic::vector_reduce_or:
5809 case Intrinsic::vector_reduce_xor:
5810 case Intrinsic::vector_reduce_add:
5811 case Intrinsic::vector_reduce_mul:
5812 case Intrinsic::vector_reduce_smax:
5813 case Intrinsic::vector_reduce_smin:
5814 case Intrinsic::vector_reduce_umax:
5815 case Intrinsic::vector_reduce_umin: {
5816 Type *ArgTy = Call.getArgOperand(0)->getType();
5817 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5818 "Intrinsic has incorrect argument type!");
5819 break;
5820 }
5821 case Intrinsic::vector_reduce_fmax:
5822 case Intrinsic::vector_reduce_fmin: {
5823 Type *ArgTy = Call.getArgOperand(0)->getType();
5824 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5825 "Intrinsic has incorrect argument type!");
5826 break;
5827 }
5828 case Intrinsic::vector_reduce_fadd:
5829 case Intrinsic::vector_reduce_fmul: {
5830 // Unlike the other reductions, the first argument is a start value. The
5831 // second argument is the vector to be reduced.
5832 Type *ArgTy = Call.getArgOperand(1)->getType();
5833 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5834 "Intrinsic has incorrect argument type!");
5835 break;
5836 }
5837 case Intrinsic::smul_fix:
5838 case Intrinsic::smul_fix_sat:
5839 case Intrinsic::umul_fix:
5840 case Intrinsic::umul_fix_sat:
5841 case Intrinsic::sdiv_fix:
5842 case Intrinsic::sdiv_fix_sat:
5843 case Intrinsic::udiv_fix:
5844 case Intrinsic::udiv_fix_sat: {
5845 Value *Op1 = Call.getArgOperand(0);
5846 Value *Op2 = Call.getArgOperand(1);
5848 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5849 "vector of ints");
5851 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5852 "vector of ints");
5853
5854 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5855 Check(Op3->getType()->isIntegerTy(),
5856 "third operand of [us][mul|div]_fix[_sat] must be an int type");
5857 Check(Op3->getBitWidth() <= 32,
5858 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
5859
5860 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5861 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5862 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5863 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5864 "the operands");
5865 } else {
5866 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5867 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5868 "to the width of the operands");
5869 }
5870 break;
5871 }
5872 case Intrinsic::lrint:
5873 case Intrinsic::llrint: {
5874 Type *ValTy = Call.getArgOperand(0)->getType();
5875 Type *ResultTy = Call.getType();
5876 Check(
5877 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5878 "llvm.lrint, llvm.llrint: argument must be floating-point or vector "
5879 "of floating-points, and result must be integer or vector of integers",
5880 &Call);
5881 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
5882 "llvm.lrint, llvm.llrint: argument and result disagree on vector use",
5883 &Call);
5884 if (ValTy->isVectorTy()) {
5885 Check(cast<VectorType>(ValTy)->getElementCount() ==
5886 cast<VectorType>(ResultTy)->getElementCount(),
5887 "llvm.lrint, llvm.llrint: argument must be same length as result",
5888 &Call);
5889 }
5890 break;
5891 }
5892 case Intrinsic::lround:
5893 case Intrinsic::llround: {
5894 Type *ValTy = Call.getArgOperand(0)->getType();
5895 Type *ResultTy = Call.getType();
5896 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5897 "Intrinsic does not support vectors", &Call);
5898 break;
5899 }
5900 case Intrinsic::bswap: {
5901 Type *Ty = Call.getType();
5902 unsigned Size = Ty->getScalarSizeInBits();
5903 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5904 break;
5905 }
5906 case Intrinsic::invariant_start: {
5907 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5908 Check(InvariantSize &&
5909 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5910 "invariant_start parameter must be -1, 0 or a positive number",
5911 &Call);
5912 break;
5913 }
5914 case Intrinsic::matrix_multiply:
5915 case Intrinsic::matrix_transpose:
5916 case Intrinsic::matrix_column_major_load:
5917 case Intrinsic::matrix_column_major_store: {
5918 Function *IF = Call.getCalledFunction();
5919 ConstantInt *Stride = nullptr;
5920 ConstantInt *NumRows;
5921 ConstantInt *NumColumns;
5922 VectorType *ResultTy;
5923 Type *Op0ElemTy = nullptr;
5924 Type *Op1ElemTy = nullptr;
5925 switch (ID) {
5926 case Intrinsic::matrix_multiply: {
5927 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5928 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
5929 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5930 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
5931 ->getNumElements() ==
5932 NumRows->getZExtValue() * N->getZExtValue(),
5933 "First argument of a matrix operation does not match specified "
5934 "shape!");
5935 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
5936 ->getNumElements() ==
5937 N->getZExtValue() * NumColumns->getZExtValue(),
5938 "Second argument of a matrix operation does not match specified "
5939 "shape!");
5940
5941 ResultTy = cast<VectorType>(Call.getType());
5942 Op0ElemTy =
5943 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5944 Op1ElemTy =
5945 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
5946 break;
5947 }
5948 case Intrinsic::matrix_transpose:
5949 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
5950 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
5951 ResultTy = cast<VectorType>(Call.getType());
5952 Op0ElemTy =
5953 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5954 break;
5955 case Intrinsic::matrix_column_major_load: {
5956 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
5957 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
5958 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5959 ResultTy = cast<VectorType>(Call.getType());
5960 break;
5961 }
5962 case Intrinsic::matrix_column_major_store: {
5963 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
5964 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
5965 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
5966 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
5967 Op0ElemTy =
5968 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5969 break;
5970 }
5971 default:
5972 llvm_unreachable("unexpected intrinsic");
5973 }
5974
5975 Check(ResultTy->getElementType()->isIntegerTy() ||
5976 ResultTy->getElementType()->isFloatingPointTy(),
5977 "Result type must be an integer or floating-point type!", IF);
5978
5979 if (Op0ElemTy)
5980 Check(ResultTy->getElementType() == Op0ElemTy,
5981 "Vector element type mismatch of the result and first operand "
5982 "vector!",
5983 IF);
5984
5985 if (Op1ElemTy)
5986 Check(ResultTy->getElementType() == Op1ElemTy,
5987 "Vector element type mismatch of the result and second operand "
5988 "vector!",
5989 IF);
5990
5991 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
5992 NumRows->getZExtValue() * NumColumns->getZExtValue(),
5993 "Result of a matrix operation does not fit in the returned vector!");
5994
5995 if (Stride)
5996 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
5997 "Stride must be greater or equal than the number of rows!", IF);
5998
5999 break;
6000 }
6001 case Intrinsic::experimental_vector_splice: {
6002 VectorType *VecTy = cast<VectorType>(Call.getType());
6003 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6004 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6005 if (Call.getParent() && Call.getParent()->getParent()) {
6006 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6007 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6008 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6009 }
6010 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6011 (Idx >= 0 && Idx < KnownMinNumElements),
6012 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6013 "known minimum number of elements in the vector. For scalable "
6014 "vectors the minimum number of elements is determined from "
6015 "vscale_range.",
6016 &Call);
6017 break;
6018 }
6019 case Intrinsic::experimental_stepvector: {
6020 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6021 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6022 VecTy->getScalarSizeInBits() >= 8,
6023 "experimental_stepvector only supported for vectors of integers "
6024 "with a bitwidth of at least 8.",
6025 &Call);
6026 break;
6027 }
6028 case Intrinsic::vector_insert: {
6029 Value *Vec = Call.getArgOperand(0);
6030 Value *SubVec = Call.getArgOperand(1);
6031 Value *Idx = Call.getArgOperand(2);
6032 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6033
6034 VectorType *VecTy = cast<VectorType>(Vec->getType());
6035 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6036
6037 ElementCount VecEC = VecTy->getElementCount();
6038 ElementCount SubVecEC = SubVecTy->getElementCount();
6039 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6040 "vector_insert parameters must have the same element "
6041 "type.",
6042 &Call);
6043 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6044 "vector_insert index must be a constant multiple of "
6045 "the subvector's known minimum vector length.");
6046
6047 // If this insertion is not the 'mixed' case where a fixed vector is
6048 // inserted into a scalable vector, ensure that the insertion of the
6049 // subvector does not overrun the parent vector.
6050 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6051 Check(IdxN < VecEC.getKnownMinValue() &&
6052 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6053 "subvector operand of vector_insert would overrun the "
6054 "vector being inserted into.");
6055 }
6056 break;
6057 }
6058 case Intrinsic::vector_extract: {
6059 Value *Vec = Call.getArgOperand(0);
6060 Value *Idx = Call.getArgOperand(1);
6061 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6062
6063 VectorType *ResultTy = cast<VectorType>(Call.getType());
6064 VectorType *VecTy = cast<VectorType>(Vec->getType());
6065
6066 ElementCount VecEC = VecTy->getElementCount();
6067 ElementCount ResultEC = ResultTy->getElementCount();
6068
6069 Check(ResultTy->getElementType() == VecTy->getElementType(),
6070 "vector_extract result must have the same element "
6071 "type as the input vector.",
6072 &Call);
6073 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6074 "vector_extract index must be a constant multiple of "
6075 "the result type's known minimum vector length.");
6076
6077 // If this extraction is not the 'mixed' case where a fixed vector is
6078 // extracted from a scalable vector, ensure that the extraction does not
6079 // overrun the parent vector.
6080 if (VecEC.isScalable() == ResultEC.isScalable()) {
6081 Check(IdxN < VecEC.getKnownMinValue() &&
6082 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6083 "vector_extract would overrun.");
6084 }
6085 break;
6086 }
6087 case Intrinsic::experimental_noalias_scope_decl: {
6088 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6089 break;
6090 }
6091 case Intrinsic::preserve_array_access_index:
6092 case Intrinsic::preserve_struct_access_index:
6093 case Intrinsic::aarch64_ldaxr:
6094 case Intrinsic::aarch64_ldxr:
6095 case Intrinsic::arm_ldaex:
6096 case Intrinsic::arm_ldrex: {
6097 Type *ElemTy = Call.getParamElementType(0);
6098 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6099 &Call);
6100 break;
6101 }
6102 case Intrinsic::aarch64_stlxr:
6103 case Intrinsic::aarch64_stxr:
6104 case Intrinsic::arm_stlex:
6105 case Intrinsic::arm_strex: {
6106 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6107 Check(ElemTy,
6108 "Intrinsic requires elementtype attribute on second argument.",
6109 &Call);
6110 break;
6111 }
6112 case Intrinsic::aarch64_prefetch: {
6113 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6114 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6115 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6116 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6117 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6118 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6119 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6120 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6121 break;
6122 }
6123 case Intrinsic::callbr_landingpad: {
6124 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6125 Check(CBR, "intrinstic requires callbr operand", &Call);
6126 if (!CBR)
6127 break;
6128
6129 const BasicBlock *LandingPadBB = Call.getParent();
6130 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6131 if (!PredBB) {
6132 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6133 break;
6134 }
6135 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6136 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6137 &Call);
6138 break;
6139 }
6140 Check(llvm::any_of(CBR->getIndirectDests(),
6141 [LandingPadBB](const BasicBlock *IndDest) {
6142 return IndDest == LandingPadBB;
6143 }),
6144 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6145 "block in indirect destination list",
6146 &Call);
6147 const Instruction &First = *LandingPadBB->begin();
6148 Check(&First == &Call, "No other instructions may proceed intrinsic",
6149 &Call);
6150 break;
6151 }
6152 case Intrinsic::amdgcn_cs_chain: {
6153 auto CallerCC = Call.getCaller()->getCallingConv();
6154 switch (CallerCC) {
6158 break;
6159 default:
6160 CheckFailed("Intrinsic can only be used from functions with the "
6161 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6162 "calling conventions",
6163 &Call);
6164 break;
6165 }
6166
6167 Check(Call.paramHasAttr(2, Attribute::InReg),
6168 "SGPR arguments must have the `inreg` attribute", &Call);
6169 Check(!Call.paramHasAttr(3, Attribute::InReg),
6170 "VGPR arguments must not have the `inreg` attribute", &Call);
6171 break;
6172 }
6173 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6174 auto CallerCC = Call.getCaller()->getCallingConv();
6175 switch (CallerCC) {
6178 break;
6179 default:
6180 CheckFailed("Intrinsic can only be used from functions with the "
6181 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6182 "calling conventions",
6183 &Call);
6184 break;
6185 }
6186
6187 unsigned InactiveIdx = 1;
6188 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6189 "Value for inactive lanes must not have the `inreg` attribute",
6190 &Call);
6191 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6192 "Value for inactive lanes must be a function argument", &Call);
6193 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6194 "Value for inactive lanes must be a VGPR function argument", &Call);
6195 break;
6196 }
6197 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6198 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6199 Value *V = Call.getArgOperand(0);
6200 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6201 Check(RegCount % 8 == 0,
6202 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6203 Check((RegCount >= 24 && RegCount <= 256),
6204 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6205 break;
6206 }
6207 case Intrinsic::experimental_convergence_entry:
6209 case Intrinsic::experimental_convergence_anchor:
6210 break;
6211 case Intrinsic::experimental_convergence_loop:
6212 break;
6213 case Intrinsic::ptrmask: {
6214 Type *Ty0 = Call.getArgOperand(0)->getType();
6215 Type *Ty1 = Call.getArgOperand(1)->getType();
6217 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6218 "of pointers",
6219 &Call);
6220 Check(
6221 Ty0->isVectorTy() == Ty1->isVectorTy(),
6222 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6223 &Call);
6224 if (Ty0->isVectorTy())
6225 Check(cast<VectorType>(Ty0)->getElementCount() ==
6226 cast<VectorType>(Ty1)->getElementCount(),
6227 "llvm.ptrmask intrinsic arguments must have the same number of "
6228 "elements",
6229 &Call);
6230 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6231 "llvm.ptrmask intrinsic second argument bitwidth must match "
6232 "pointer index type size of first argument",
6233 &Call);
6234 break;
6235 }
6236 case Intrinsic::threadlocal_address: {
6237 const Value &Arg0 = *Call.getArgOperand(0);
6238 Check(isa<GlobalValue>(Arg0),
6239 "llvm.threadlocal.address first argument must be a GlobalValue");
6240 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6241 "llvm.threadlocal.address operand isThreadLocal() must be true");
6242 break;
6243 }
6244 };
6245
6246 // Verify that there aren't any unmediated control transfers between funclets.
6248 Function *F = Call.getParent()->getParent();
6249 if (F->hasPersonalityFn() &&
6250 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6251 // Run EH funclet coloring on-demand and cache results for other intrinsic
6252 // calls in this function
6253 if (BlockEHFuncletColors.empty())
6254 BlockEHFuncletColors = colorEHFunclets(*F);
6255
6256 // Check for catch-/cleanup-pad in first funclet block
6257 bool InEHFunclet = false;
6258 BasicBlock *CallBB = Call.getParent();
6259 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6260 assert(CV.size() > 0 && "Uncolored block");
6261 for (BasicBlock *ColorFirstBB : CV)
6262 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6263 InEHFunclet = true;
6264
6265 // Check for funclet operand bundle
6266 bool HasToken = false;
6267 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6268 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6269 HasToken = true;
6270
6271 // This would cause silent code truncation in WinEHPrepare
6272 if (InEHFunclet)
6273 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6274 }
6275 }
6276}
6277
6278/// Carefully grab the subprogram from a local scope.
6279///
6280/// This carefully grabs the subprogram from a local scope, avoiding the
6281/// built-in assertions that would typically fire.
6283 if (!LocalScope)
6284 return nullptr;
6285
6286 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6287 return SP;
6288
6289 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6290 return getSubprogram(LB->getRawScope());
6291
6292 // Just return null; broken scope chains are checked elsewhere.
6293 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6294 return nullptr;
6295}
6296
6297void Verifier::visit(DbgLabelRecord &DLR) {
6298 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6299 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6300
6301 // Ignore broken !dbg attachments; they're checked elsewhere.
6302 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6303 if (!isa<DILocation>(N))
6304 return;
6305
6306 BasicBlock *BB = DLR.getParent();
6307 Function *F = BB ? BB->getParent() : nullptr;
6308
6309 // The scopes for variables and !dbg attachments must agree.
6310 DILabel *Label = DLR.getLabel();
6311 DILocation *Loc = DLR.getDebugLoc();
6312 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6313
6314 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6315 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6316 if (!LabelSP || !LocSP)
6317 return;
6318
6319 CheckDI(LabelSP == LocSP,
6320 "mismatched subprogram between #dbg_label label and !dbg attachment",
6321 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6322 Loc->getScope()->getSubprogram());
6323}
6324
6325void Verifier::visit(DbgVariableRecord &DVR) {
6326 BasicBlock *BB = DVR.getParent();
6327 Function *F = BB->getParent();
6328
6332 "invalid #dbg record type", &DVR, DVR.getType());
6333
6334 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6335 // DIArgList, or an empty MDNode (which is a legacy representation for an
6336 // "undef" location).
6337 auto *MD = DVR.getRawLocation();
6338 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6339 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6340 "invalid #dbg record address/value", &DVR, MD);
6341 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6342 visitValueAsMetadata(*VAM, F);
6343 else if (auto *AL = dyn_cast<DIArgList>(MD))
6344 visitDIArgList(*AL, F);
6345
6346 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6347 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6348 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6349
6350 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6351 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6352 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6353
6354 if (DVR.isDbgAssign()) {
6355 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6356 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6357 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6358 AreDebugLocsAllowed::No);
6359
6360 const auto *RawAddr = DVR.getRawAddress();
6361 // Similarly to the location above, the address for an assign
6362 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6363 // represents an undef address.
6364 CheckDI(
6365 isa<ValueAsMetadata>(RawAddr) ||
6366 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6367 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6368 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6369 visitValueAsMetadata(*VAM, F);
6370
6371 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6372 "invalid #dbg_assign address expression", &DVR,
6374 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6375
6376 // All of the linked instructions should be in the same function as DVR.
6377 for (Instruction *I : at::getAssignmentInsts(&DVR))
6378 CheckDI(DVR.getFunction() == I->getFunction(),
6379 "inst not in same function as #dbg_assign", I, &DVR);
6380 }
6381
6382 // This check is redundant with one in visitLocalVariable().
6383 DILocalVariable *Var = DVR.getVariable();
6384 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6385 Var->getRawType());
6386
6387 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6388 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6389 &DVR, DLNode);
6390 DILocation *Loc = DVR.getDebugLoc();
6391
6392 // The scopes for variables and !dbg attachments must agree.
6393 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6394 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6395 if (!VarSP || !LocSP)
6396 return; // Broken scope chains are checked elsewhere.
6397
6398 CheckDI(VarSP == LocSP,
6399 "mismatched subprogram between #dbg record variable and DILocation",
6400 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6401 Loc->getScope()->getSubprogram());
6402
6403 verifyFnArgs(DVR);
6404}
6405
6406void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6407 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6408 auto *RetTy = cast<VectorType>(VPCast->getType());
6409 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6410 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6411 "VP cast intrinsic first argument and result vector lengths must be "
6412 "equal",
6413 *VPCast);
6414
6415 switch (VPCast->getIntrinsicID()) {
6416 default:
6417 llvm_unreachable("Unknown VP cast intrinsic");
6418 case Intrinsic::vp_trunc:
6419 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6420 "llvm.vp.trunc intrinsic first argument and result element type "
6421 "must be integer",
6422 *VPCast);
6423 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6424 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6425 "larger than the bit size of the return type",
6426 *VPCast);
6427 break;
6428 case Intrinsic::vp_zext:
6429 case Intrinsic::vp_sext:
6430 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6431 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6432 "element type must be integer",
6433 *VPCast);
6434 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6435 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6436 "argument must be smaller than the bit size of the return type",
6437 *VPCast);
6438 break;
6439 case Intrinsic::vp_fptoui:
6440 case Intrinsic::vp_fptosi:
6441 case Intrinsic::vp_lrint:
6442 case Intrinsic::vp_llrint:
6443 Check(
6444 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6445 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6446 "type must be floating-point and result element type must be integer",
6447 *VPCast);
6448 break;
6449 case Intrinsic::vp_uitofp:
6450 case Intrinsic::vp_sitofp:
6451 Check(
6452 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6453 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6454 "type must be integer and result element type must be floating-point",
6455 *VPCast);
6456 break;
6457 case Intrinsic::vp_fptrunc:
6458 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6459 "llvm.vp.fptrunc intrinsic first argument and result element type "
6460 "must be floating-point",
6461 *VPCast);
6462 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6463 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6464 "larger than the bit size of the return type",
6465 *VPCast);
6466 break;
6467 case Intrinsic::vp_fpext:
6468 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6469 "llvm.vp.fpext intrinsic first argument and result element type "
6470 "must be floating-point",
6471 *VPCast);
6472 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6473 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6474 "smaller than the bit size of the return type",
6475 *VPCast);
6476 break;
6477 case Intrinsic::vp_ptrtoint:
6478 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6479 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6480 "pointer and result element type must be integer",
6481 *VPCast);
6482 break;
6483 case Intrinsic::vp_inttoptr:
6484 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6485 "llvm.vp.inttoptr intrinsic first argument element type must be "
6486 "integer and result element type must be pointer",
6487 *VPCast);
6488 break;
6489 }
6490 }
6491 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6492 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6494 "invalid predicate for VP FP comparison intrinsic", &VPI);
6495 }
6496 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6497 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6499 "invalid predicate for VP integer comparison intrinsic", &VPI);
6500 }
6501 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6502 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6503 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6504 "unsupported bits for llvm.vp.is.fpclass test mask");
6505 }
6506}
6507
6508void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6509 unsigned NumOperands;
6510 bool HasRoundingMD;
6511 switch (FPI.getIntrinsicID()) {
6512#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6513 case Intrinsic::INTRINSIC: \
6514 NumOperands = NARG; \
6515 HasRoundingMD = ROUND_MODE; \
6516 break;
6517#include "llvm/IR/ConstrainedOps.def"
6518 default:
6519 llvm_unreachable("Invalid constrained FP intrinsic!");
6520 }
6521 NumOperands += (1 + HasRoundingMD);
6522 // Compare intrinsics carry an extra predicate metadata operand.
6523 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6524 NumOperands += 1;
6525 Check((FPI.arg_size() == NumOperands),
6526 "invalid arguments for constrained FP intrinsic", &FPI);
6527
6528 switch (FPI.getIntrinsicID()) {
6529 case Intrinsic::experimental_constrained_lrint:
6530 case Intrinsic::experimental_constrained_llrint: {
6531 Type *ValTy = FPI.getArgOperand(0)->getType();
6532 Type *ResultTy = FPI.getType();
6533 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6534 "Intrinsic does not support vectors", &FPI);
6535 }
6536 break;
6537
6538 case Intrinsic::experimental_constrained_lround:
6539 case Intrinsic::experimental_constrained_llround: {
6540 Type *ValTy = FPI.getArgOperand(0)->getType();
6541 Type *ResultTy = FPI.getType();
6542 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6543 "Intrinsic does not support vectors", &FPI);
6544 break;
6545 }
6546
6547 case Intrinsic::experimental_constrained_fcmp:
6548 case Intrinsic::experimental_constrained_fcmps: {
6549 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6551 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6552 break;
6553 }
6554
6555 case Intrinsic::experimental_constrained_fptosi:
6556 case Intrinsic::experimental_constrained_fptoui: {
6557 Value *Operand = FPI.getArgOperand(0);
6558 ElementCount SrcEC;
6559 Check(Operand->getType()->isFPOrFPVectorTy(),
6560 "Intrinsic first argument must be floating point", &FPI);
6561 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6562 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6563 }
6564
6565 Operand = &FPI;
6566 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6567 "Intrinsic first argument and result disagree on vector use", &FPI);
6568 Check(Operand->getType()->isIntOrIntVectorTy(),
6569 "Intrinsic result must be an integer", &FPI);
6570 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6571 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6572 "Intrinsic first argument and result vector lengths must be equal",
6573 &FPI);
6574 }
6575 }
6576 break;
6577
6578 case Intrinsic::experimental_constrained_sitofp:
6579 case Intrinsic::experimental_constrained_uitofp: {
6580 Value *Operand = FPI.getArgOperand(0);
6581 ElementCount SrcEC;
6582 Check(Operand->getType()->isIntOrIntVectorTy(),
6583 "Intrinsic first argument must be integer", &FPI);
6584 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6585 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6586 }
6587
6588 Operand = &FPI;
6589 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6590 "Intrinsic first argument and result disagree on vector use", &FPI);
6591 Check(Operand->getType()->isFPOrFPVectorTy(),
6592 "Intrinsic result must be a floating point", &FPI);
6593 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6594 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6595 "Intrinsic first argument and result vector lengths must be equal",
6596 &FPI);
6597 }
6598 } break;
6599
6600 case Intrinsic::experimental_constrained_fptrunc:
6601 case Intrinsic::experimental_constrained_fpext: {
6602 Value *Operand = FPI.getArgOperand(0);
6603 Type *OperandTy = Operand->getType();
6604 Value *Result = &FPI;
6605 Type *ResultTy = Result->getType();
6606 Check(OperandTy->isFPOrFPVectorTy(),
6607 "Intrinsic first argument must be FP or FP vector", &FPI);
6608 Check(ResultTy->isFPOrFPVectorTy(),
6609 "Intrinsic result must be FP or FP vector", &FPI);
6610 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6611 "Intrinsic first argument and result disagree on vector use", &FPI);
6612 if (OperandTy->isVectorTy()) {
6613 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6614 cast<VectorType>(ResultTy)->getElementCount(),
6615 "Intrinsic first argument and result vector lengths must be equal",
6616 &FPI);
6617 }
6618 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6619 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6620 "Intrinsic first argument's type must be larger than result type",
6621 &FPI);
6622 } else {
6623 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6624 "Intrinsic first argument's type must be smaller than result type",
6625 &FPI);
6626 }
6627 }
6628 break;
6629
6630 default:
6631 break;
6632 }
6633
6634 // If a non-metadata argument is passed in a metadata slot then the
6635 // error will be caught earlier when the incorrect argument doesn't
6636 // match the specification in the intrinsic call table. Thus, no
6637 // argument type check is needed here.
6638
6639 Check(FPI.getExceptionBehavior().has_value(),
6640 "invalid exception behavior argument", &FPI);
6641 if (HasRoundingMD) {
6642 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6643 &FPI);
6644 }
6645}
6646
6647void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6648 auto *MD = DII.getRawLocation();
6649 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6650 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6651 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6652 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6653 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6654 DII.getRawVariable());
6655 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6656 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6657 DII.getRawExpression());
6658
6659 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6660 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6661 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6662 DAI->getRawAssignID());
6663 const auto *RawAddr = DAI->getRawAddress();
6664 CheckDI(
6665 isa<ValueAsMetadata>(RawAddr) ||
6666 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6667 "invalid llvm.dbg.assign intrinsic address", &DII,
6668 DAI->getRawAddress());
6669 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6670 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6671 DAI->getRawAddressExpression());
6672 // All of the linked instructions should be in the same function as DII.
6674 CheckDI(DAI->getFunction() == I->getFunction(),
6675 "inst not in same function as dbg.assign", I, DAI);
6676 }
6677
6678 // Ignore broken !dbg attachments; they're checked elsewhere.
6679 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6680 if (!isa<DILocation>(N))
6681 return;
6682
6683 BasicBlock *BB = DII.getParent();
6684 Function *F = BB ? BB->getParent() : nullptr;
6685
6686 // The scopes for variables and !dbg attachments must agree.
6687 DILocalVariable *Var = DII.getVariable();
6688 DILocation *Loc = DII.getDebugLoc();
6689 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6690 &DII, BB, F);
6691
6692 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6693 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6694 if (!VarSP || !LocSP)
6695 return; // Broken scope chains are checked elsewhere.
6696
6697 CheckDI(VarSP == LocSP,
6698 "mismatched subprogram between llvm.dbg." + Kind +
6699 " variable and !dbg attachment",
6700 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6701 Loc->getScope()->getSubprogram());
6702
6703 // This check is redundant with one in visitLocalVariable().
6704 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6705 Var->getRawType());
6706 verifyFnArgs(DII);
6707}
6708
6709void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6710 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6711 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6712 DLI.getRawLabel());
6713
6714 // Ignore broken !dbg attachments; they're checked elsewhere.
6715 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6716 if (!isa<DILocation>(N))
6717 return;
6718
6719 BasicBlock *BB = DLI.getParent();
6720 Function *F = BB ? BB->getParent() : nullptr;
6721
6722 // The scopes for variables and !dbg attachments must agree.
6723 DILabel *Label = DLI.getLabel();
6724 DILocation *Loc = DLI.getDebugLoc();
6725 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6726 BB, F);
6727
6728 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6729 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6730 if (!LabelSP || !LocSP)
6731 return;
6732
6733 CheckDI(LabelSP == LocSP,
6734 "mismatched subprogram between llvm.dbg." + Kind +
6735 " label and !dbg attachment",
6736 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6737 Loc->getScope()->getSubprogram());
6738}
6739
6740void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6741 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6742 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6743
6744 // We don't know whether this intrinsic verified correctly.
6745 if (!V || !E || !E->isValid())
6746 return;
6747
6748 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6749 auto Fragment = E->getFragmentInfo();
6750 if (!Fragment)
6751 return;
6752
6753 // The frontend helps out GDB by emitting the members of local anonymous
6754 // unions as artificial local variables with shared storage. When SROA splits
6755 // the storage for artificial local variables that are smaller than the entire
6756 // union, the overhang piece will be outside of the allotted space for the
6757 // variable and this check fails.
6758 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6759 if (V->isArtificial())
6760 return;
6761
6762 verifyFragmentExpression(*V, *Fragment, &I);
6763}
6764void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
6765 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
6766 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6767
6768 // We don't know whether this intrinsic verified correctly.
6769 if (!V || !E || !E->isValid())
6770 return;
6771
6772 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6773 auto Fragment = E->getFragmentInfo();
6774 if (!Fragment)
6775 return;
6776
6777 // The frontend helps out GDB by emitting the members of local anonymous
6778 // unions as artificial local variables with shared storage. When SROA splits
6779 // the storage for artificial local variables that are smaller than the entire
6780 // union, the overhang piece will be outside of the allotted space for the
6781 // variable and this check fails.
6782 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6783 if (V->isArtificial())
6784 return;
6785
6786 verifyFragmentExpression(*V, *Fragment, &DVR);
6787}
6788
6789template <typename ValueOrMetadata>
6790void Verifier::verifyFragmentExpression(const DIVariable &V,
6792 ValueOrMetadata *Desc) {
6793 // If there's no size, the type is broken, but that should be checked
6794 // elsewhere.
6795 auto VarSize = V.getSizeInBits();
6796 if (!VarSize)
6797 return;
6798
6799 unsigned FragSize = Fragment.SizeInBits;
6800 unsigned FragOffset = Fragment.OffsetInBits;
6801 CheckDI(FragSize + FragOffset <= *VarSize,
6802 "fragment is larger than or outside of variable", Desc, &V);
6803 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6804}
6805
6806void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6807 // This function does not take the scope of noninlined function arguments into
6808 // account. Don't run it if current function is nodebug, because it may
6809 // contain inlined debug intrinsics.
6810 if (!HasDebugInfo)
6811 return;
6812
6813 // For performance reasons only check non-inlined ones.
6814 if (I.getDebugLoc()->getInlinedAt())
6815 return;
6816
6817 DILocalVariable *Var = I.getVariable();
6818 CheckDI(Var, "dbg intrinsic without variable");
6819
6820 unsigned ArgNo = Var->getArg();
6821 if (!ArgNo)
6822 return;
6823
6824 // Verify there are no duplicate function argument debug info entries.
6825 // These will cause hard-to-debug assertions in the DWARF backend.
6826 if (DebugFnArgs.size() < ArgNo)
6827 DebugFnArgs.resize(ArgNo, nullptr);
6828
6829 auto *Prev = DebugFnArgs[ArgNo - 1];
6830 DebugFnArgs[ArgNo - 1] = Var;
6831 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6832 Prev, Var);
6833}
6834void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
6835 // This function does not take the scope of noninlined function arguments into
6836 // account. Don't run it if current function is nodebug, because it may
6837 // contain inlined debug intrinsics.
6838 if (!HasDebugInfo)
6839 return;
6840
6841 // For performance reasons only check non-inlined ones.
6842 if (DVR.getDebugLoc()->getInlinedAt())
6843 return;
6844
6845 DILocalVariable *Var = DVR.getVariable();
6846 CheckDI(Var, "#dbg record without variable");
6847
6848 unsigned ArgNo = Var->getArg();
6849 if (!ArgNo)
6850 return;
6851
6852 // Verify there are no duplicate function argument debug info entries.
6853 // These will cause hard-to-debug assertions in the DWARF backend.
6854 if (DebugFnArgs.size() < ArgNo)
6855 DebugFnArgs.resize(ArgNo, nullptr);
6856
6857 auto *Prev = DebugFnArgs[ArgNo - 1];
6858 DebugFnArgs[ArgNo - 1] = Var;
6859 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
6860 Prev, Var);
6861}
6862
6863void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6864 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6865
6866 // We don't know whether this intrinsic verified correctly.
6867 if (!E || !E->isValid())
6868 return;
6869
6870 if (isa<ValueAsMetadata>(I.getRawLocation())) {
6871 Value *VarValue = I.getVariableLocationOp(0);
6872 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6873 return;
6874 // We allow EntryValues for swift async arguments, as they have an
6875 // ABI-guarantee to be turned into a specific register.
6876 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6877 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6878 return;
6879 }
6880
6881 CheckDI(!E->isEntryValue(),
6882 "Entry values are only allowed in MIR unless they target a "
6883 "swiftasync Argument",
6884 &I);
6885}
6886void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
6887 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6888
6889 // We don't know whether this intrinsic verified correctly.
6890 if (!E || !E->isValid())
6891 return;
6892
6893 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
6894 Value *VarValue = DVR.getVariableLocationOp(0);
6895 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6896 return;
6897 // We allow EntryValues for swift async arguments, as they have an
6898 // ABI-guarantee to be turned into a specific register.
6899 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6900 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6901 return;
6902 }
6903
6904 CheckDI(!E->isEntryValue(),
6905 "Entry values are only allowed in MIR unless they target a "
6906 "swiftasync Argument",
6907 &DVR);
6908}
6909
6910void Verifier::verifyCompileUnits() {
6911 // When more than one Module is imported into the same context, such as during
6912 // an LTO build before linking the modules, ODR type uniquing may cause types
6913 // to point to a different CU. This check does not make sense in this case.
6914 if (M.getContext().isODRUniquingDebugTypes())
6915 return;
6916 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
6918 if (CUs)
6919 Listed.insert(CUs->op_begin(), CUs->op_end());
6920 for (const auto *CU : CUVisited)
6921 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
6922 CUVisited.clear();
6923}
6924
6925void Verifier::verifyDeoptimizeCallingConvs() {
6926 if (DeoptimizeDeclarations.empty())
6927 return;
6928
6929 const Function *First = DeoptimizeDeclarations[0];
6930 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
6931 Check(First->getCallingConv() == F->getCallingConv(),
6932 "All llvm.experimental.deoptimize declarations must have the same "
6933 "calling convention",
6934 First, F);
6935 }
6936}
6937
6938void Verifier::verifyAttachedCallBundle(const CallBase &Call,
6939 const OperandBundleUse &BU) {
6940 FunctionType *FTy = Call.getFunctionType();
6941
6942 Check((FTy->getReturnType()->isPointerTy() ||
6943 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
6944 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
6945 "function returning a pointer or a non-returning function that has a "
6946 "void return type",
6947 Call);
6948
6949 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
6950 "operand bundle \"clang.arc.attachedcall\" requires one function as "
6951 "an argument",
6952 Call);
6953
6954 auto *Fn = cast<Function>(BU.Inputs.front());
6955 Intrinsic::ID IID = Fn->getIntrinsicID();
6956
6957 if (IID) {
6958 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
6959 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
6960 "invalid function argument", Call);
6961 } else {
6962 StringRef FnName = Fn->getName();
6963 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
6964 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
6965 "invalid function argument", Call);
6966 }
6967}
6968
6969void Verifier::verifyNoAliasScopeDecl() {
6970 if (NoAliasScopeDecls.empty())
6971 return;
6972
6973 // only a single scope must be declared at a time.
6974 for (auto *II : NoAliasScopeDecls) {
6975 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
6976 "Not a llvm.experimental.noalias.scope.decl ?");
6977 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
6978 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
6979 Check(ScopeListMV != nullptr,
6980 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
6981 "argument",
6982 II);
6983
6984 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
6985 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
6986 Check(ScopeListMD->getNumOperands() == 1,
6987 "!id.scope.list must point to a list with a single scope", II);
6988 visitAliasScopeListMetadata(ScopeListMD);
6989 }
6990
6991 // Only check the domination rule when requested. Once all passes have been
6992 // adapted this option can go away.
6994 return;
6995
6996 // Now sort the intrinsics based on the scope MDNode so that declarations of
6997 // the same scopes are next to each other.
6998 auto GetScope = [](IntrinsicInst *II) {
6999 const auto *ScopeListMV = cast<MetadataAsValue>(
7000 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
7001 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7002 };
7003
7004 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7005 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7006 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7007 return GetScope(Lhs) < GetScope(Rhs);
7008 };
7009
7010 llvm::sort(NoAliasScopeDecls, Compare);
7011
7012 // Go over the intrinsics and check that for the same scope, they are not
7013 // dominating each other.
7014 auto ItCurrent = NoAliasScopeDecls.begin();
7015 while (ItCurrent != NoAliasScopeDecls.end()) {
7016 auto CurScope = GetScope(*ItCurrent);
7017 auto ItNext = ItCurrent;
7018 do {
7019 ++ItNext;
7020 } while (ItNext != NoAliasScopeDecls.end() &&
7021 GetScope(*ItNext) == CurScope);
7022
7023 // [ItCurrent, ItNext) represents the declarations for the same scope.
7024 // Ensure they are not dominating each other.. but only if it is not too
7025 // expensive.
7026 if (ItNext - ItCurrent < 32)
7027 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7028 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7029 if (I != J)
7030 Check(!DT.dominates(I, J),
7031 "llvm.experimental.noalias.scope.decl dominates another one "
7032 "with the same scope",
7033 I);
7034 ItCurrent = ItNext;
7035 }
7036}
7037
7038//===----------------------------------------------------------------------===//
7039// Implement the public interfaces to this file...
7040//===----------------------------------------------------------------------===//
7041
7043 Function &F = const_cast<Function &>(f);
7044
7045 // Don't use a raw_null_ostream. Printing IR is expensive.
7046 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7047
7048 // Note that this function's return value is inverted from what you would
7049 // expect of a function called "verify".
7050 return !V.verify(F);
7051}
7052
7054 bool *BrokenDebugInfo) {
7055 // Don't use a raw_null_ostream. Printing IR is expensive.
7056 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7057
7058 bool Broken = false;
7059 for (const Function &F : M)
7060 Broken |= !V.verify(F);
7061
7062 Broken |= !V.verify();
7063 if (BrokenDebugInfo)
7064 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7065 // Note that this function's return value is inverted from what you would
7066 // expect of a function called "verify".
7067 return Broken;
7068}
7069
7070namespace {
7071
7072struct VerifierLegacyPass : public FunctionPass {
7073 static char ID;
7074
7075 std::unique_ptr<Verifier> V;
7076 bool FatalErrors = true;
7077
7078 VerifierLegacyPass() : FunctionPass(ID) {
7080 }
7081 explicit VerifierLegacyPass(bool FatalErrors)
7082 : FunctionPass(ID),
7083 FatalErrors(FatalErrors) {
7085 }
7086
7087 bool doInitialization(Module &M) override {
7088 V = std::make_unique<Verifier>(
7089 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7090 return false;
7091 }
7092
7093 bool runOnFunction(Function &F) override {
7094 if (!V->verify(F) && FatalErrors) {
7095 errs() << "in function " << F.getName() << '\n';
7096 report_fatal_error("Broken function found, compilation aborted!");
7097 }
7098 return false;
7099 }
7100
7101 bool doFinalization(Module &M) override {
7102 bool HasErrors = false;
7103 for (Function &F : M)
7104 if (F.isDeclaration())
7105 HasErrors |= !V->verify(F);
7106
7107 HasErrors |= !V->verify();
7108 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7109 report_fatal_error("Broken module found, compilation aborted!");
7110 return false;
7111 }
7112
7113 void getAnalysisUsage(AnalysisUsage &AU) const override {
7114 AU.setPreservesAll();
7115 }
7116};
7117
7118} // end anonymous namespace
7119
7120/// Helper to issue failure from the TBAA verification
7121template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7122 if (Diagnostic)
7123 return Diagnostic->CheckFailed(Args...);
7124}
7125
7126#define CheckTBAA(C, ...) \
7127 do { \
7128 if (!(C)) { \
7129 CheckFailed(__VA_ARGS__); \
7130 return false; \
7131 } \
7132 } while (false)
7133
7134/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7135/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7136/// struct-type node describing an aggregate data structure (like a struct).
7137TBAAVerifier::TBAABaseNodeSummary
7138TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7139 bool IsNewFormat) {
7140 if (BaseNode->getNumOperands() < 2) {
7141 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7142 return {true, ~0u};
7143 }
7144
7145 auto Itr = TBAABaseNodes.find(BaseNode);
7146 if (Itr != TBAABaseNodes.end())
7147 return Itr->second;
7148
7149 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7150 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7151 (void)InsertResult;
7152 assert(InsertResult.second && "We just checked!");
7153 return Result;
7154}
7155
7156TBAAVerifier::TBAABaseNodeSummary
7157TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7158 bool IsNewFormat) {
7159 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7160
7161 if (BaseNode->getNumOperands() == 2) {
7162 // Scalar nodes can only be accessed at offset 0.
7163 return isValidScalarTBAANode(BaseNode)
7164 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7165 : InvalidNode;
7166 }
7167
7168 if (IsNewFormat) {
7169 if (BaseNode->getNumOperands() % 3 != 0) {
7170 CheckFailed("Access tag nodes must have the number of operands that is a "
7171 "multiple of 3!", BaseNode);
7172 return InvalidNode;
7173 }
7174 } else {
7175 if (BaseNode->getNumOperands() % 2 != 1) {
7176 CheckFailed("Struct tag nodes must have an odd number of operands!",
7177 BaseNode);
7178 return InvalidNode;
7179 }
7180 }
7181
7182 // Check the type size field.
7183 if (IsNewFormat) {
7184 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7185 BaseNode->getOperand(1));
7186 if (!TypeSizeNode) {
7187 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7188 return InvalidNode;
7189 }
7190 }
7191
7192 // Check the type name field. In the new format it can be anything.
7193 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7194 CheckFailed("Struct tag nodes have a string as their first operand",
7195 BaseNode);
7196 return InvalidNode;
7197 }
7198
7199 bool Failed = false;
7200
7201 std::optional<APInt> PrevOffset;
7202 unsigned BitWidth = ~0u;
7203
7204 // We've already checked that BaseNode is not a degenerate root node with one
7205 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7206 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7207 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7208 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7209 Idx += NumOpsPerField) {
7210 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7211 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7212 if (!isa<MDNode>(FieldTy)) {
7213 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7214 Failed = true;
7215 continue;
7216 }
7217
7218 auto *OffsetEntryCI =
7219 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7220 if (!OffsetEntryCI) {
7221 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7222 Failed = true;
7223 continue;
7224 }
7225
7226 if (BitWidth == ~0u)
7227 BitWidth = OffsetEntryCI->getBitWidth();
7228
7229 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7230 CheckFailed(
7231 "Bitwidth between the offsets and struct type entries must match", &I,
7232 BaseNode);
7233 Failed = true;
7234 continue;
7235 }
7236
7237 // NB! As far as I can tell, we generate a non-strictly increasing offset
7238 // sequence only from structs that have zero size bit fields. When
7239 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7240 // pick the field lexically the latest in struct type metadata node. This
7241 // mirrors the actual behavior of the alias analysis implementation.
7242 bool IsAscending =
7243 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7244
7245 if (!IsAscending) {
7246 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7247 Failed = true;
7248 }
7249
7250 PrevOffset = OffsetEntryCI->getValue();
7251
7252 if (IsNewFormat) {
7253 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7254 BaseNode->getOperand(Idx + 2));
7255 if (!MemberSizeNode) {
7256 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7257 Failed = true;
7258 continue;
7259 }
7260 }
7261 }
7262
7263 return Failed ? InvalidNode
7264 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7265}
7266
7267static bool IsRootTBAANode(const MDNode *MD) {
7268 return MD->getNumOperands() < 2;
7269}
7270
7271static bool IsScalarTBAANodeImpl(const MDNode *MD,
7273 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7274 return false;
7275
7276 if (!isa<MDString>(MD->getOperand(0)))
7277 return false;
7278
7279 if (MD->getNumOperands() == 3) {
7280 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7281 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7282 return false;
7283 }
7284
7285 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7286 return Parent && Visited.insert(Parent).second &&
7287 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7288}
7289
7290bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7291 auto ResultIt = TBAAScalarNodes.find(MD);
7292 if (ResultIt != TBAAScalarNodes.end())
7293 return ResultIt->second;
7294
7296 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7297 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7298 (void)InsertResult;
7299 assert(InsertResult.second && "Just checked!");
7300
7301 return Result;
7302}
7303
7304/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7305/// Offset in place to be the offset within the field node returned.
7306///
7307/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7308MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7309 const MDNode *BaseNode,
7310 APInt &Offset,
7311 bool IsNewFormat) {
7312 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7313
7314 // Scalar nodes have only one possible "field" -- their parent in the access
7315 // hierarchy. Offset must be zero at this point, but our caller is supposed
7316 // to check that.
7317 if (BaseNode->getNumOperands() == 2)
7318 return cast<MDNode>(BaseNode->getOperand(1));
7319
7320 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7321 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7322 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7323 Idx += NumOpsPerField) {
7324 auto *OffsetEntryCI =
7325 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7326 if (OffsetEntryCI->getValue().ugt(Offset)) {
7327 if (Idx == FirstFieldOpNo) {
7328 CheckFailed("Could not find TBAA parent in struct type node", &I,
7329 BaseNode, &Offset);
7330 return nullptr;
7331 }
7332
7333 unsigned PrevIdx = Idx - NumOpsPerField;
7334 auto *PrevOffsetEntryCI =
7335 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7336 Offset -= PrevOffsetEntryCI->getValue();
7337 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7338 }
7339 }
7340
7341 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7342 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7343 BaseNode->getOperand(LastIdx + 1));
7344 Offset -= LastOffsetEntryCI->getValue();
7345 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7346}
7347
7349 if (!Type || Type->getNumOperands() < 3)
7350 return false;
7351
7352 // In the new format type nodes shall have a reference to the parent type as
7353 // its first operand.
7354 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7355}
7356
7358 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7359 &I, MD);
7360
7361 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7362 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7363 isa<AtomicCmpXchgInst>(I),
7364 "This instruction shall not have a TBAA access tag!", &I);
7365
7366 bool IsStructPathTBAA =
7367 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7368
7369 CheckTBAA(IsStructPathTBAA,
7370 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7371 &I);
7372
7373 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7374 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7375
7376 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7377
7378 if (IsNewFormat) {
7379 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7380 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7381 } else {
7382 CheckTBAA(MD->getNumOperands() < 5,
7383 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7384 }
7385
7386 // Check the access size field.
7387 if (IsNewFormat) {
7388 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7389 MD->getOperand(3));
7390 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7391 }
7392
7393 // Check the immutability flag.
7394 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7395 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7396 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7397 MD->getOperand(ImmutabilityFlagOpNo));
7398 CheckTBAA(IsImmutableCI,
7399 "Immutability tag on struct tag metadata must be a constant", &I,
7400 MD);
7401 CheckTBAA(
7402 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7403 "Immutability part of the struct tag metadata must be either 0 or 1",
7404 &I, MD);
7405 }
7406
7407 CheckTBAA(BaseNode && AccessType,
7408 "Malformed struct tag metadata: base and access-type "
7409 "should be non-null and point to Metadata nodes",
7410 &I, MD, BaseNode, AccessType);
7411
7412 if (!IsNewFormat) {
7413 CheckTBAA(isValidScalarTBAANode(AccessType),
7414 "Access type node must be a valid scalar type", &I, MD,
7415 AccessType);
7416 }
7417
7418 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7419 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7420
7421 APInt Offset = OffsetCI->getValue();
7422 bool SeenAccessTypeInPath = false;
7423
7424 SmallPtrSet<MDNode *, 4> StructPath;
7425
7426 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7427 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7428 IsNewFormat)) {
7429 if (!StructPath.insert(BaseNode).second) {
7430 CheckFailed("Cycle detected in struct path", &I, MD);
7431 return false;
7432 }
7433
7434 bool Invalid;
7435 unsigned BaseNodeBitWidth;
7436 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7437 IsNewFormat);
7438
7439 // If the base node is invalid in itself, then we've already printed all the
7440 // errors we wanted to print.
7441 if (Invalid)
7442 return false;
7443
7444 SeenAccessTypeInPath |= BaseNode == AccessType;
7445
7446 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7447 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7448 &I, MD, &Offset);
7449
7450 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7451 (BaseNodeBitWidth == 0 && Offset == 0) ||
7452 (IsNewFormat && BaseNodeBitWidth == ~0u),
7453 "Access bit-width not the same as description bit-width", &I, MD,
7454 BaseNodeBitWidth, Offset.getBitWidth());
7455
7456 if (IsNewFormat && SeenAccessTypeInPath)
7457 break;
7458 }
7459
7460 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7461 MD);
7462 return true;
7463}
7464
7466 CheckTBAA(MD->getNumOperands() % 3 == 0,
7467 "tbaa.struct operands must occur in groups of three", &I, MD);
7468
7469 // Each group of three operands must consist of two integers and a
7470 // tbaa node. Moreover, the regions described by the offset and size
7471 // operands must be non-overlapping.
7472 std::optional<APInt> NextFree;
7473 for (unsigned int Idx = 0; Idx < MD->getNumOperands(); Idx += 3) {
7474 auto *OffsetCI =
7475 mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(Idx));
7476 CheckTBAA(OffsetCI, "Offset must be a constant integer", &I, MD);
7477
7478 auto *SizeCI =
7479 mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(Idx + 1));
7480 CheckTBAA(SizeCI, "Size must be a constant integer", &I, MD);
7481
7482 MDNode *TBAA = dyn_cast_or_null<MDNode>(MD->getOperand(Idx + 2));
7483 CheckTBAA(TBAA, "TBAA tag missing", &I, MD);
7484 visitTBAAMetadata(I, TBAA);
7485
7486 bool NonOverlapping = !NextFree || NextFree->ule(OffsetCI->getValue());
7487 CheckTBAA(NonOverlapping, "Overlapping tbaa.struct regions", &I, MD);
7488
7489 NextFree = OffsetCI->getValue() + SizeCI->getValue();
7490 }
7491 return true;
7492}
7493
7494char VerifierLegacyPass::ID = 0;
7495INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7496
7498 return new VerifierLegacyPass(FatalErrors);
7499}
7500
7501AnalysisKey VerifierAnalysis::Key;
7504 Result Res;
7506 return Res;
7507}
7508
7511 return { llvm::verifyFunction(F, &dbgs()), false };
7512}
7513
7515 auto Res = AM.getResult<VerifierAnalysis>(M);
7516 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7517 report_fatal_error("Broken module found, compilation aborted!");
7518
7519 return PreservedAnalyses::all();
7520}
7521
7523 auto res = AM.getResult<VerifierAnalysis>(F);
7524 if (res.IRBroken && FatalErrors)
7525 report_fatal_error("Broken function found, compilation aborted!");
7526
7527 return PreservedAnalyses::all();
7528}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the StringMap class.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:668
@ FnAttr
Definition: Attributes.cpp:666
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:301
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T1
Module.h This file contains the declarations for the Module class.
uint64_t High
LLVMContext & Context
#define P(N)
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This defines the Use class.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7271
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1129
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2635
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:662
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7348
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:672
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:713
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1131
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1130
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6282
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3746
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7126
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7267
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4074
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4318
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1280
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3756
bool isFiniteNonZero() const
Definition: APFloat.h:1305
bool isNegative() const
Definition: APFloat.h:1295
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:395
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1128
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:377
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:849
BinOp getOperation() const
Definition: Instructions.h:845
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:887
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:841
std::string getAsString(bool InAttrGrp=false) const
Definition: Attributes.cpp:928
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:265
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:689
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:288
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:681
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
@ None
No attributes have been set.
Definition: Attributes.h:87
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:101
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:685
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:193
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:430
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:499
const LandingPadInst * getLandingPadInst() const
Return the landingpad instruction associated with the landing pad.
Definition: BasicBlock.cpp:676
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
const Instruction & front() const
Definition: BasicBlock.h:453
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:460
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:657
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1864
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1809
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:1122
bool isIntPredicate() const
Definition: InstrTypes.h:1123
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:1116
ConstantArray - Constant Array Declarations.
Definition: Constants.h:423
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1017
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:217
bool isNegative() const
Definition: Constants.h:200
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:205
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This class represents a range of values.
Definition: ConstantRange.h:47
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1499
This is an important base class in LLVM.
Definition: Constant.h:41
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:460
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:487
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2706
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:231
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:851
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:236
const std::string & getGC() const
Definition: Function.cpp:766
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:95
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
const Function * getResolverFunction() const
Definition: Globals.cpp:592
static FunctionType * getResolverFunctionType(Type *IFuncValTy)
Definition: GlobalIFunc.h:83
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:87
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasComdat() const
Definition: GlobalObject.h:128
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:281
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:253
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:86
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
Invoke instruction.
BasicBlock * getUnwindDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:184
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:245
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:255
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
bool isDistinct() const
Definition: Metadata.h:1250
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1247
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
Metadata * get() const
Definition: Metadata.h:918
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:610
Typed, array-like tuple of metadata.
Definition: Metadata.h:1627
Tuple of metadata.
Definition: Metadata.h:1470
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5195
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:267
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:286
A tuple of MDNodes.
Definition: Metadata.h:1729
StringRef getName() const
Definition: Metadata.cpp:1399
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4856
iterator_range< op_iterator > operands()
Definition: Metadata.h:1825
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2213
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:466
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:420
bool equals(StringRef RHS) const
equals - Check for string equality, this is more efficient than compare() when the relative ordering ...
Definition: StringRef.h:164
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
bool containsScalableVectorType(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:400
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:612
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7357
bool visitTBAAStructMetadata(Instruction &I, const MDNode *MD)
Definition: Verifier.cpp:7465
@ CanBeGlobal
This type may be used as the value type of a global variable.
Definition: DerivedTypes.h:771
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:219
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:222
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:807
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:697
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:785
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:108
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7502
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7514
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
Definition: Function.cpp:1700
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1305
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:214
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:215
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1023
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Definition: Function.cpp:1726
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1886
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
bool isFortran(SourceLanguage S)
Definition: Dwarf.h:279
SourceLanguage
Definition: Dwarf.h:204
@ DW_LANG_lo_user
Definition: Dwarf.h:208
@ DW_MACINFO_undef
Definition: Dwarf.h:473
@ DW_MACINFO_start_file
Definition: Dwarf.h:474
@ DW_MACINFO_define
Definition: Dwarf.h:472
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2406
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7042
AllocFnKind
Definition: Attributes.h:48
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7497
@ Invalid
Denotes invalid value.
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7053
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Holds the characteristics of one fragment of a larger variable.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1389
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1417
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1390
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:300
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:152
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:146
raw_ostream * OS
Definition: Verifier.cpp:138
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:293
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:148
LLVMContext & Context
Definition: Verifier.cpp:143
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:150
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:282
const Module & M
Definition: Verifier.cpp:139
const DataLayout & DL
Definition: Verifier.cpp:142
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:309
ModuleSlotTracker MST
Definition: Verifier.cpp:140