LLVM  6.0.0svn
Verifier.cpp
Go to the documentation of this file.
1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the function verifier interface, that can be used for some
11 // sanity checking of input to the system.
12 //
13 // Note that this does not provide full `Java style' security and verifications,
14 // instead it just tries to ensure that code is well-formed.
15 //
16 // * Both of a binary operator's parameters are of the same type
17 // * Verify that the indices of mem access instructions match other operands
18 // * Verify that arithmetic and other things are only performed on first-class
19 // types. Verify that shifts & logicals only happen on integrals f.e.
20 // * All of the constants in a switch statement are of the correct type
21 // * The code is in valid SSA form
22 // * It should be illegal to put a label into any other type (like a structure)
23 // or to return one. [except constant arrays!]
24 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
25 // * PHI nodes must have an entry for each predecessor, with no extras.
26 // * PHI nodes must be the first thing in a basic block, all grouped together
27 // * PHI nodes must have at least one entry
28 // * All basic blocks should only end with terminator insts, not contain them
29 // * The entry node to a function must not have predecessors
30 // * All Instructions must be embedded into a basic block
31 // * Functions cannot take a void-typed parameter
32 // * Verify that a function's argument list agrees with it's declared type.
33 // * It is illegal to specify a name for a void value.
34 // * It is illegal to have a internal global value with no initializer
35 // * It is illegal to have a ret instruction that returns a value that does not
36 // agree with the function return value type.
37 // * Function call argument types match the function prototype
38 // * A landing pad is defined by a landingpad instruction, and can be jumped to
39 // only by the unwind edge of an invoke instruction.
40 // * A landingpad instruction must be the first non-PHI instruction in the
41 // block.
42 // * Landingpad instructions must be in a function with a personality function.
43 // * All other things that are tested by asserts spread about the code...
44 //
45 //===----------------------------------------------------------------------===//
46 
47 #include "llvm/IR/Verifier.h"
48 #include "llvm/ADT/APFloat.h"
49 #include "llvm/ADT/APInt.h"
50 #include "llvm/ADT/ArrayRef.h"
51 #include "llvm/ADT/DenseMap.h"
52 #include "llvm/ADT/MapVector.h"
53 #include "llvm/ADT/Optional.h"
54 #include "llvm/ADT/STLExtras.h"
55 #include "llvm/ADT/SmallPtrSet.h"
56 #include "llvm/ADT/SmallSet.h"
57 #include "llvm/ADT/SmallVector.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/ADT/ilist.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/CallSite.h"
68 #include "llvm/IR/CallingConv.h"
69 #include "llvm/IR/Comdat.h"
70 #include "llvm/IR/Constant.h"
71 #include "llvm/IR/ConstantRange.h"
72 #include "llvm/IR/Constants.h"
73 #include "llvm/IR/DataLayout.h"
74 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/DiagnosticInfo.h"
79 #include "llvm/IR/Dominators.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/GlobalAlias.h"
82 #include "llvm/IR/GlobalValue.h"
83 #include "llvm/IR/GlobalVariable.h"
84 #include "llvm/IR/InlineAsm.h"
85 #include "llvm/IR/InstVisitor.h"
86 #include "llvm/IR/InstrTypes.h"
87 #include "llvm/IR/Instruction.h"
88 #include "llvm/IR/Instructions.h"
89 #include "llvm/IR/IntrinsicInst.h"
90 #include "llvm/IR/Intrinsics.h"
91 #include "llvm/IR/LLVMContext.h"
92 #include "llvm/IR/Metadata.h"
93 #include "llvm/IR/Module.h"
95 #include "llvm/IR/PassManager.h"
96 #include "llvm/IR/Statepoint.h"
97 #include "llvm/IR/Type.h"
98 #include "llvm/IR/Use.h"
99 #include "llvm/IR/User.h"
100 #include "llvm/IR/Value.h"
101 #include "llvm/Pass.h"
103 #include "llvm/Support/Casting.h"
105 #include "llvm/Support/Debug.h"
107 #include "llvm/Support/MathExtras.h"
109 #include <algorithm>
110 #include <cassert>
111 #include <cstdint>
112 #include <memory>
113 #include <string>
114 #include <utility>
115 
116 using namespace llvm;
117 
118 static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(true));
119 
120 namespace llvm {
121 
124  const Module &M;
126  const DataLayout &DL;
128 
129  /// Track the brokenness of the module while recursively visiting.
130  bool Broken = false;
131  /// Broken debug info can be "recovered" from by stripping the debug info.
132  bool BrokenDebugInfo = false;
133  /// Whether to treat broken debug info as an error.
135 
136  explicit VerifierSupport(raw_ostream *OS, const Module &M)
137  : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {}
138 
139 private:
140  void Write(const Module *M) {
141  *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
142  }
143 
144  void Write(const Value *V) {
145  if (!V)
146  return;
147  if (isa<Instruction>(V)) {
148  V->print(*OS, MST);
149  *OS << '\n';
150  } else {
151  V->printAsOperand(*OS, true, MST);
152  *OS << '\n';
153  }
154  }
155 
156  void Write(ImmutableCallSite CS) {
157  Write(CS.getInstruction());
158  }
159 
160  void Write(const Metadata *MD) {
161  if (!MD)
162  return;
163  MD->print(*OS, MST, &M);
164  *OS << '\n';
165  }
166 
167  template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
168  Write(MD.get());
169  }
170 
171  void Write(const NamedMDNode *NMD) {
172  if (!NMD)
173  return;
174  NMD->print(*OS, MST);
175  *OS << '\n';
176  }
177 
178  void Write(Type *T) {
179  if (!T)
180  return;
181  *OS << ' ' << *T;
182  }
183 
184  void Write(const Comdat *C) {
185  if (!C)
186  return;
187  *OS << *C;
188  }
189 
190  void Write(const APInt *AI) {
191  if (!AI)
192  return;
193  *OS << *AI << '\n';
194  }
195 
196  void Write(const unsigned i) { *OS << i << '\n'; }
197 
198  template <typename T> void Write(ArrayRef<T> Vs) {
199  for (const T &V : Vs)
200  Write(V);
201  }
202 
203  template <typename T1, typename... Ts>
204  void WriteTs(const T1 &V1, const Ts &... Vs) {
205  Write(V1);
206  WriteTs(Vs...);
207  }
208 
209  template <typename... Ts> void WriteTs() {}
210 
211 public:
212  /// \brief A check failed, so printout out the condition and the message.
213  ///
214  /// This provides a nice place to put a breakpoint if you want to see why
215  /// something is not correct.
216  void CheckFailed(const Twine &Message) {
217  if (OS)
218  *OS << Message << '\n';
219  Broken = true;
220  }
221 
222  /// \brief A check failed (with values to print).
223  ///
224  /// This calls the Message-only version so that the above is easier to set a
225  /// breakpoint on.
226  template <typename T1, typename... Ts>
227  void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
228  CheckFailed(Message);
229  if (OS)
230  WriteTs(V1, Vs...);
231  }
232 
233  /// A debug info check failed.
234  void DebugInfoCheckFailed(const Twine &Message) {
235  if (OS)
236  *OS << Message << '\n';
237  Broken |= TreatBrokenDebugInfoAsError;
238  BrokenDebugInfo = true;
239  }
240 
241  /// A debug info check failed (with values to print).
242  template <typename T1, typename... Ts>
243  void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
244  const Ts &... Vs) {
245  DebugInfoCheckFailed(Message);
246  if (OS)
247  WriteTs(V1, Vs...);
248  }
249 };
250 
251 } // namespace llvm
252 
253 namespace {
254 
255 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
256  friend class InstVisitor<Verifier>;
257 
258  DominatorTree DT;
259 
260  /// \brief When verifying a basic block, keep track of all of the
261  /// instructions we have seen so far.
262  ///
263  /// This allows us to do efficient dominance checks for the case when an
264  /// instruction has an operand that is an instruction in the same block.
265  SmallPtrSet<Instruction *, 16> InstsInThisBlock;
266 
267  /// \brief Keep track of the metadata nodes that have been checked already.
269 
270  /// Keep track which DISubprogram is attached to which function.
271  DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
272 
273  /// Track all DICompileUnits visited.
275 
276  /// \brief The result type for a landingpad.
277  Type *LandingPadResultTy;
278 
279  /// \brief Whether we've seen a call to @llvm.localescape in this function
280  /// already.
281  bool SawFrameEscape;
282 
283  /// Whether the current function has a DISubprogram attached to it.
284  bool HasDebugInfo = false;
285 
286  /// Stores the count of how many objects were passed to llvm.localescape for a
287  /// given function and the largest index passed to llvm.localrecover.
289 
290  // Maps catchswitches and cleanuppads that unwind to siblings to the
291  // terminators that indicate the unwind, used to detect cycles therein.
293 
294  /// Cache of constants visited in search of ConstantExprs.
295  SmallPtrSet<const Constant *, 32> ConstantExprVisited;
296 
297  /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
298  SmallVector<const Function *, 4> DeoptimizeDeclarations;
299 
300  // Verify that this GlobalValue is only used in this module.
301  // This map is used to avoid visiting uses twice. We can arrive at a user
302  // twice, if they have multiple operands. In particular for very large
303  // constant expressions, we can arrive at a particular user many times.
304  SmallPtrSet<const Value *, 32> GlobalValueVisited;
305 
306  // Keeps track of duplicate function argument debug info.
308 
309  TBAAVerifier TBAAVerifyHelper;
310 
311  void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
312 
313 public:
314  explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
315  const Module &M)
316  : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
317  SawFrameEscape(false), TBAAVerifyHelper(this) {
318  TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
319  }
320 
321  bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
322 
323  bool verify(const Function &F) {
324  assert(F.getParent() == &M &&
325  "An instance of this class only works with a specific module!");
326 
327  // First ensure the function is well-enough formed to compute dominance
328  // information, and directly compute a dominance tree. We don't rely on the
329  // pass manager to provide this as it isolates us from a potentially
330  // out-of-date dominator tree and makes it significantly more complex to run
331  // this code outside of a pass manager.
332  // FIXME: It's really gross that we have to cast away constness here.
333  if (!F.empty())
334  DT.recalculate(const_cast<Function &>(F));
335 
336  for (const BasicBlock &BB : F) {
337  if (!BB.empty() && BB.back().isTerminator())
338  continue;
339 
340  if (OS) {
341  *OS << "Basic Block in function '" << F.getName()
342  << "' does not have terminator!\n";
343  BB.printAsOperand(*OS, true, MST);
344  *OS << "\n";
345  }
346  return false;
347  }
348 
349  Broken = false;
350  // FIXME: We strip const here because the inst visitor strips const.
351  visit(const_cast<Function &>(F));
352  verifySiblingFuncletUnwinds();
353  InstsInThisBlock.clear();
354  DebugFnArgs.clear();
355  LandingPadResultTy = nullptr;
356  SawFrameEscape = false;
357  SiblingFuncletInfo.clear();
358 
359  return !Broken;
360  }
361 
362  /// Verify the module that this instance of \c Verifier was initialized with.
363  bool verify() {
364  Broken = false;
365 
366  // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
367  for (const Function &F : M)
368  if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
369  DeoptimizeDeclarations.push_back(&F);
370 
371  // Now that we've visited every function, verify that we never asked to
372  // recover a frame index that wasn't escaped.
373  verifyFrameRecoverIndices();
374  for (const GlobalVariable &GV : M.globals())
375  visitGlobalVariable(GV);
376 
377  for (const GlobalAlias &GA : M.aliases())
378  visitGlobalAlias(GA);
379 
380  for (const NamedMDNode &NMD : M.named_metadata())
381  visitNamedMDNode(NMD);
382 
383  for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
384  visitComdat(SMEC.getValue());
385 
386  visitModuleFlags(M);
387  visitModuleIdents(M);
388 
389  verifyCompileUnits();
390 
391  verifyDeoptimizeCallingConvs();
392  DISubprogramAttachments.clear();
393  return !Broken;
394  }
395 
396 private:
397  // Verification methods...
398  void visitGlobalValue(const GlobalValue &GV);
399  void visitGlobalVariable(const GlobalVariable &GV);
400  void visitGlobalAlias(const GlobalAlias &GA);
401  void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
402  void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
403  const GlobalAlias &A, const Constant &C);
404  void visitNamedMDNode(const NamedMDNode &NMD);
405  void visitMDNode(const MDNode &MD);
406  void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
407  void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
408  void visitComdat(const Comdat &C);
409  void visitModuleIdents(const Module &M);
410  void visitModuleFlags(const Module &M);
411  void visitModuleFlag(const MDNode *Op,
413  SmallVectorImpl<const MDNode *> &Requirements);
414  void visitFunction(const Function &F);
415  void visitBasicBlock(BasicBlock &BB);
416  void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
417  void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
418 
419  template <class Ty> bool isValidMetadataArray(const MDTuple &N);
420 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
421 #include "llvm/IR/Metadata.def"
422  void visitDIScope(const DIScope &N);
423  void visitDIVariable(const DIVariable &N);
424  void visitDILexicalBlockBase(const DILexicalBlockBase &N);
425  void visitDITemplateParameter(const DITemplateParameter &N);
426 
427  void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
428 
429  // InstVisitor overrides...
431  void visit(Instruction &I);
432 
433  void visitTruncInst(TruncInst &I);
434  void visitZExtInst(ZExtInst &I);
435  void visitSExtInst(SExtInst &I);
436  void visitFPTruncInst(FPTruncInst &I);
437  void visitFPExtInst(FPExtInst &I);
438  void visitFPToUIInst(FPToUIInst &I);
439  void visitFPToSIInst(FPToSIInst &I);
440  void visitUIToFPInst(UIToFPInst &I);
441  void visitSIToFPInst(SIToFPInst &I);
442  void visitIntToPtrInst(IntToPtrInst &I);
443  void visitPtrToIntInst(PtrToIntInst &I);
444  void visitBitCastInst(BitCastInst &I);
445  void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
446  void visitPHINode(PHINode &PN);
447  void visitBinaryOperator(BinaryOperator &B);
448  void visitICmpInst(ICmpInst &IC);
449  void visitFCmpInst(FCmpInst &FC);
450  void visitExtractElementInst(ExtractElementInst &EI);
451  void visitInsertElementInst(InsertElementInst &EI);
452  void visitShuffleVectorInst(ShuffleVectorInst &EI);
453  void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
454  void visitCallInst(CallInst &CI);
455  void visitInvokeInst(InvokeInst &II);
456  void visitGetElementPtrInst(GetElementPtrInst &GEP);
457  void visitLoadInst(LoadInst &LI);
458  void visitStoreInst(StoreInst &SI);
459  void verifyDominatesUse(Instruction &I, unsigned i);
460  void visitInstruction(Instruction &I);
461  void visitTerminatorInst(TerminatorInst &I);
462  void visitBranchInst(BranchInst &BI);
463  void visitReturnInst(ReturnInst &RI);
464  void visitSwitchInst(SwitchInst &SI);
465  void visitIndirectBrInst(IndirectBrInst &BI);
466  void visitSelectInst(SelectInst &SI);
467  void visitUserOp1(Instruction &I);
468  void visitUserOp2(Instruction &I) { visitUserOp1(I); }
469  void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
470  void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
471  void visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII);
472  void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
473  void visitAtomicRMWInst(AtomicRMWInst &RMWI);
474  void visitFenceInst(FenceInst &FI);
475  void visitAllocaInst(AllocaInst &AI);
476  void visitExtractValueInst(ExtractValueInst &EVI);
477  void visitInsertValueInst(InsertValueInst &IVI);
478  void visitEHPadPredecessors(Instruction &I);
479  void visitLandingPadInst(LandingPadInst &LPI);
480  void visitResumeInst(ResumeInst &RI);
481  void visitCatchPadInst(CatchPadInst &CPI);
482  void visitCatchReturnInst(CatchReturnInst &CatchReturn);
483  void visitCleanupPadInst(CleanupPadInst &CPI);
484  void visitFuncletPadInst(FuncletPadInst &FPI);
485  void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
486  void visitCleanupReturnInst(CleanupReturnInst &CRI);
487 
488  void verifyCallSite(CallSite CS);
489  void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
490  void verifySwiftErrorValue(const Value *SwiftErrorVal);
491  void verifyMustTailCall(CallInst &CI);
492  bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
493  unsigned ArgNo, std::string &Suffix);
494  bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
495  void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
496  const Value *V);
497  void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
498  void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
499  const Value *V);
500  void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
501 
502  void visitConstantExprsRecursively(const Constant *EntryC);
503  void visitConstantExpr(const ConstantExpr *CE);
504  void verifyStatepoint(ImmutableCallSite CS);
505  void verifyFrameRecoverIndices();
506  void verifySiblingFuncletUnwinds();
507 
508  void verifyFragmentExpression(const DbgInfoIntrinsic &I);
509  template <typename ValueOrMetadata>
510  void verifyFragmentExpression(const DIVariable &V,
512  ValueOrMetadata *Desc);
513  void verifyFnArgs(const DbgInfoIntrinsic &I);
514 
515  /// Module-level debug info verification...
516  void verifyCompileUnits();
517 
518  /// Module-level verification that all @llvm.experimental.deoptimize
519  /// declarations share the same calling convention.
520  void verifyDeoptimizeCallingConvs();
521 };
522 
523 } // end anonymous namespace
524 
525 /// We know that cond should be true, if not print an error message.
526 #define Assert(C, ...) \
527  do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
528 
529 /// We know that a debug info condition should be true, if not print
530 /// an error message.
531 #define AssertDI(C, ...) \
532  do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
533 
534 void Verifier::visit(Instruction &I) {
535  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
536  Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
538 }
539 
540 // Helper to recursively iterate over indirect users. By
541 // returning false, the callback can ask to stop recursing
542 // further.
543 static void forEachUser(const Value *User,
545  llvm::function_ref<bool(const Value *)> Callback) {
546  if (!Visited.insert(User).second)
547  return;
548  for (const Value *TheNextUser : User->materialized_users())
549  if (Callback(TheNextUser))
550  forEachUser(TheNextUser, Visited, Callback);
551 }
552 
553 void Verifier::visitGlobalValue(const GlobalValue &GV) {
555  "Global is external, but doesn't have external or weak linkage!", &GV);
556 
558  "huge alignment values are unsupported", &GV);
559  Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
560  "Only global variables can have appending linkage!", &GV);
561 
562  if (GV.hasAppendingLinkage()) {
563  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
564  Assert(GVar && GVar->getValueType()->isArrayTy(),
565  "Only global arrays can have appending linkage!", GVar);
566  }
567 
568  if (GV.isDeclarationForLinker())
569  Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
570 
571  forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
572  if (const Instruction *I = dyn_cast<Instruction>(V)) {
573  if (!I->getParent() || !I->getParent()->getParent())
574  CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
575  I);
576  else if (I->getParent()->getParent()->getParent() != &M)
577  CheckFailed("Global is referenced in a different module!", &GV, &M, I,
578  I->getParent()->getParent(),
579  I->getParent()->getParent()->getParent());
580  return false;
581  } else if (const Function *F = dyn_cast<Function>(V)) {
582  if (F->getParent() != &M)
583  CheckFailed("Global is used by function in a different module", &GV, &M,
584  F, F->getParent());
585  return false;
586  }
587  return true;
588  });
589 }
590 
591 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
592  if (GV.hasInitializer()) {
593  Assert(GV.getInitializer()->getType() == GV.getValueType(),
594  "Global variable initializer type does not match global "
595  "variable type!",
596  &GV);
597  // If the global has common linkage, it must have a zero initializer and
598  // cannot be constant.
599  if (GV.hasCommonLinkage()) {
601  "'common' global must have a zero initializer!", &GV);
602  Assert(!GV.isConstant(), "'common' global may not be marked constant!",
603  &GV);
604  Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
605  }
606  }
607 
608  if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
609  GV.getName() == "llvm.global_dtors")) {
611  "invalid linkage for intrinsic global variable", &GV);
612  // Don't worry about emitting an error for it not being an array,
613  // visitGlobalValue will complain on appending non-array.
614  if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
615  StructType *STy = dyn_cast<StructType>(ATy->getElementType());
616  PointerType *FuncPtrTy =
618  // FIXME: Reject the 2-field form in LLVM 4.0.
619  Assert(STy &&
620  (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
621  STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
622  STy->getTypeAtIndex(1) == FuncPtrTy,
623  "wrong type for intrinsic global variable", &GV);
624  if (STy->getNumElements() == 3) {
625  Type *ETy = STy->getTypeAtIndex(2);
626  Assert(ETy->isPointerTy() &&
627  cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
628  "wrong type for intrinsic global variable", &GV);
629  }
630  }
631  }
632 
633  if (GV.hasName() && (GV.getName() == "llvm.used" ||
634  GV.getName() == "llvm.compiler.used")) {
636  "invalid linkage for intrinsic global variable", &GV);
637  Type *GVType = GV.getValueType();
638  if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
639  PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
640  Assert(PTy, "wrong type for intrinsic global variable", &GV);
641  if (GV.hasInitializer()) {
642  const Constant *Init = GV.getInitializer();
643  const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
644  Assert(InitArray, "wrong initalizer for intrinsic global variable",
645  Init);
646  for (Value *Op : InitArray->operands()) {
647  Value *V = Op->stripPointerCastsNoFollowAliases();
648  Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
649  isa<GlobalAlias>(V),
650  "invalid llvm.used member", V);
651  Assert(V->hasName(), "members of llvm.used must be named", V);
652  }
653  }
654  }
655  }
656 
658  (GV.isDeclaration() && GV.hasExternalLinkage()) ||
660  "Global is marked as dllimport, but not external", &GV);
661 
662  // Visit any debug info attachments.
665  for (auto *MD : MDs) {
666  if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
667  visitDIGlobalVariableExpression(*GVE);
668  else
669  AssertDI(false, "!dbg attachment of global variable must be a "
670  "DIGlobalVariableExpression");
671  }
672 
673  if (!GV.hasInitializer()) {
674  visitGlobalValue(GV);
675  return;
676  }
677 
678  // Walk any aggregate initializers looking for bitcasts between address spaces
679  visitConstantExprsRecursively(GV.getInitializer());
680 
681  visitGlobalValue(GV);
682 }
683 
684 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
686  Visited.insert(&GA);
687  visitAliaseeSubExpr(Visited, GA, C);
688 }
689 
690 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
691  const GlobalAlias &GA, const Constant &C) {
692  if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
693  Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
694  &GA);
695 
696  if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
697  Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
698 
699  Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
700  &GA);
701  } else {
702  // Only continue verifying subexpressions of GlobalAliases.
703  // Do not recurse into global initializers.
704  return;
705  }
706  }
707 
708  if (const auto *CE = dyn_cast<ConstantExpr>(&C))
709  visitConstantExprsRecursively(CE);
710 
711  for (const Use &U : C.operands()) {
712  Value *V = &*U;
713  if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
714  visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
715  else if (const auto *C2 = dyn_cast<Constant>(V))
716  visitAliaseeSubExpr(Visited, GA, *C2);
717  }
718 }
719 
720 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
722  "Alias should have private, internal, linkonce, weak, linkonce_odr, "
723  "weak_odr, or external linkage!",
724  &GA);
725  const Constant *Aliasee = GA.getAliasee();
726  Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
727  Assert(GA.getType() == Aliasee->getType(),
728  "Alias and aliasee types should match!", &GA);
729 
730  Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
731  "Aliasee should be either GlobalValue or ConstantExpr", &GA);
732 
733  visitAliaseeSubExpr(GA, *Aliasee);
734 
735  visitGlobalValue(GA);
736 }
737 
738 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
739  // There used to be various other llvm.dbg.* nodes, but we don't support
740  // upgrading them and we want to reserve the namespace for future uses.
741  if (NMD.getName().startswith("llvm.dbg."))
742  AssertDI(NMD.getName() == "llvm.dbg.cu",
743  "unrecognized named metadata node in the llvm.dbg namespace",
744  &NMD);
745  for (const MDNode *MD : NMD.operands()) {
746  if (NMD.getName() == "llvm.dbg.cu")
747  AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
748 
749  if (!MD)
750  continue;
751 
752  visitMDNode(*MD);
753  }
754 }
755 
756 void Verifier::visitMDNode(const MDNode &MD) {
757  // Only visit each node once. Metadata can be mutually recursive, so this
758  // avoids infinite recursion here, as well as being an optimization.
759  if (!MDNodes.insert(&MD).second)
760  return;
761 
762  switch (MD.getMetadataID()) {
763  default:
764  llvm_unreachable("Invalid MDNode subclass");
765  case Metadata::MDTupleKind:
766  break;
767 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
768  case Metadata::CLASS##Kind: \
769  visit##CLASS(cast<CLASS>(MD)); \
770  break;
771 #include "llvm/IR/Metadata.def"
772  }
773 
774  for (const Metadata *Op : MD.operands()) {
775  if (!Op)
776  continue;
777  Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
778  &MD, Op);
779  if (auto *N = dyn_cast<MDNode>(Op)) {
780  visitMDNode(*N);
781  continue;
782  }
783  if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
784  visitValueAsMetadata(*V, nullptr);
785  continue;
786  }
787  }
788 
789  // Check these last, so we diagnose problems in operands first.
790  Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
791  Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
792 }
793 
794 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
795  Assert(MD.getValue(), "Expected valid value", &MD);
796  Assert(!MD.getValue()->getType()->isMetadataTy(),
797  "Unexpected metadata round-trip through values", &MD, MD.getValue());
798 
799  auto *L = dyn_cast<LocalAsMetadata>(&MD);
800  if (!L)
801  return;
802 
803  Assert(F, "function-local metadata used outside a function", L);
804 
805  // If this was an instruction, bb, or argument, verify that it is in the
806  // function that we expect.
807  Function *ActualF = nullptr;
808  if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
809  Assert(I->getParent(), "function-local metadata not in basic block", L, I);
810  ActualF = I->getParent()->getParent();
811  } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
812  ActualF = BB->getParent();
813  else if (Argument *A = dyn_cast<Argument>(L->getValue()))
814  ActualF = A->getParent();
815  assert(ActualF && "Unimplemented function local metadata case!");
816 
817  Assert(ActualF == F, "function-local metadata used in wrong function", L);
818 }
819 
820 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
821  Metadata *MD = MDV.getMetadata();
822  if (auto *N = dyn_cast<MDNode>(MD)) {
823  visitMDNode(*N);
824  return;
825  }
826 
827  // Only visit each node once. Metadata can be mutually recursive, so this
828  // avoids infinite recursion here, as well as being an optimization.
829  if (!MDNodes.insert(MD).second)
830  return;
831 
832  if (auto *V = dyn_cast<ValueAsMetadata>(MD))
833  visitValueAsMetadata(*V, F);
834 }
835 
836 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
837 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
838 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
839 
840 void Verifier::visitDILocation(const DILocation &N) {
841  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
842  "location requires a valid scope", &N, N.getRawScope());
843  if (auto *IA = N.getRawInlinedAt())
844  AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
845  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
846  AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
847 }
848 
849 void Verifier::visitGenericDINode(const GenericDINode &N) {
850  AssertDI(N.getTag(), "invalid tag", &N);
851 }
852 
853 void Verifier::visitDIScope(const DIScope &N) {
854  if (auto *F = N.getRawFile())
855  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
856 }
857 
858 void Verifier::visitDISubrange(const DISubrange &N) {
859  AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
860  AssertDI(N.getCount() >= -1, "invalid subrange count", &N);
861 }
862 
863 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
864  AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
865 }
866 
867 void Verifier::visitDIBasicType(const DIBasicType &N) {
868  AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
869  N.getTag() == dwarf::DW_TAG_unspecified_type,
870  "invalid tag", &N);
871 }
872 
873 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
874  // Common scope checks.
875  visitDIScope(N);
876 
877  AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
878  N.getTag() == dwarf::DW_TAG_pointer_type ||
879  N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
880  N.getTag() == dwarf::DW_TAG_reference_type ||
881  N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
882  N.getTag() == dwarf::DW_TAG_const_type ||
883  N.getTag() == dwarf::DW_TAG_volatile_type ||
884  N.getTag() == dwarf::DW_TAG_restrict_type ||
885  N.getTag() == dwarf::DW_TAG_atomic_type ||
886  N.getTag() == dwarf::DW_TAG_member ||
887  N.getTag() == dwarf::DW_TAG_inheritance ||
888  N.getTag() == dwarf::DW_TAG_friend,
889  "invalid tag", &N);
890  if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
891  AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
892  N.getRawExtraData());
893  }
894 
895  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
896  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
897  N.getRawBaseType());
898 
899  if (N.getDWARFAddressSpace()) {
900  AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
901  N.getTag() == dwarf::DW_TAG_reference_type,
902  "DWARF address space only applies to pointer or reference types",
903  &N);
904  }
905 }
906 
907 static bool hasConflictingReferenceFlags(unsigned Flags) {
908  return (Flags & DINode::FlagLValueReference) &&
909  (Flags & DINode::FlagRValueReference);
910 }
911 
912 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
913  auto *Params = dyn_cast<MDTuple>(&RawParams);
914  AssertDI(Params, "invalid template params", &N, &RawParams);
915  for (Metadata *Op : Params->operands()) {
916  AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
917  &N, Params, Op);
918  }
919 }
920 
921 void Verifier::visitDICompositeType(const DICompositeType &N) {
922  // Common scope checks.
923  visitDIScope(N);
924 
925  AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
926  N.getTag() == dwarf::DW_TAG_structure_type ||
927  N.getTag() == dwarf::DW_TAG_union_type ||
928  N.getTag() == dwarf::DW_TAG_enumeration_type ||
929  N.getTag() == dwarf::DW_TAG_class_type,
930  "invalid tag", &N);
931 
932  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
933  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
934  N.getRawBaseType());
935 
936  AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
937  "invalid composite elements", &N, N.getRawElements());
938  AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
939  N.getRawVTableHolder());
941  "invalid reference flags", &N);
942  if (auto *Params = N.getRawTemplateParams())
943  visitTemplateParams(N, *Params);
944 
945  if (N.getTag() == dwarf::DW_TAG_class_type ||
946  N.getTag() == dwarf::DW_TAG_union_type) {
947  AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
948  "class/union requires a filename", &N, N.getFile());
949  }
950 }
951 
952 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
953  AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
954  if (auto *Types = N.getRawTypeArray()) {
955  AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
956  for (Metadata *Ty : N.getTypeArray()->operands()) {
957  AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
958  }
959  }
961  "invalid reference flags", &N);
962 }
963 
964 void Verifier::visitDIFile(const DIFile &N) {
965  AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
967  N.getChecksum().empty()), "invalid checksum kind", &N);
968 }
969 
970 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
971  AssertDI(N.isDistinct(), "compile units must be distinct", &N);
972  AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
973 
974  // Don't bother verifying the compilation directory or producer string
975  // as those could be empty.
976  AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
977  N.getRawFile());
978  AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
979  N.getFile());
980 
982  "invalid emission kind", &N);
983 
984  if (auto *Array = N.getRawEnumTypes()) {
985  AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
986  for (Metadata *Op : N.getEnumTypes()->operands()) {
987  auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
988  AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
989  "invalid enum type", &N, N.getEnumTypes(), Op);
990  }
991  }
992  if (auto *Array = N.getRawRetainedTypes()) {
993  AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
994  for (Metadata *Op : N.getRetainedTypes()->operands()) {
995  AssertDI(Op && (isa<DIType>(Op) ||
996  (isa<DISubprogram>(Op) &&
997  !cast<DISubprogram>(Op)->isDefinition())),
998  "invalid retained type", &N, Op);
999  }
1000  }
1001  if (auto *Array = N.getRawGlobalVariables()) {
1002  AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1003  for (Metadata *Op : N.getGlobalVariables()->operands()) {
1004  AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1005  "invalid global variable ref", &N, Op);
1006  }
1007  }
1008  if (auto *Array = N.getRawImportedEntities()) {
1009  AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1010  for (Metadata *Op : N.getImportedEntities()->operands()) {
1011  AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1012  &N, Op);
1013  }
1014  }
1015  if (auto *Array = N.getRawMacros()) {
1016  AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1017  for (Metadata *Op : N.getMacros()->operands()) {
1018  AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1019  }
1020  }
1021  CUVisited.insert(&N);
1022 }
1023 
1024 void Verifier::visitDISubprogram(const DISubprogram &N) {
1025  AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1026  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1027  if (auto *F = N.getRawFile())
1028  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1029  else
1030  AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1031  if (auto *T = N.getRawType())
1032  AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1033  AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1034  N.getRawContainingType());
1035  if (auto *Params = N.getRawTemplateParams())
1036  visitTemplateParams(N, *Params);
1037  if (auto *S = N.getRawDeclaration())
1038  AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1039  "invalid subprogram declaration", &N, S);
1040  if (auto *RawVars = N.getRawVariables()) {
1041  auto *Vars = dyn_cast<MDTuple>(RawVars);
1042  AssertDI(Vars, "invalid variable list", &N, RawVars);
1043  for (Metadata *Op : Vars->operands()) {
1044  AssertDI(Op && isa<DILocalVariable>(Op), "invalid local variable", &N,
1045  Vars, Op);
1046  }
1047  }
1048  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1049  "invalid reference flags", &N);
1050 
1051  auto *Unit = N.getRawUnit();
1052  if (N.isDefinition()) {
1053  // Subprogram definitions (not part of the type hierarchy).
1054  AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1055  AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1056  AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1057  } else {
1058  // Subprogram declarations (part of the type hierarchy).
1059  AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1060  }
1061 
1062  if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1063  auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1064  AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1065  for (Metadata *Op : ThrownTypes->operands())
1066  AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1067  Op);
1068  }
1069 }
1070 
1071 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1072  AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1073  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1074  "invalid local scope", &N, N.getRawScope());
1075  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1076  AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1077 }
1078 
1079 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1080  visitDILexicalBlockBase(N);
1081 
1082  AssertDI(N.getLine() || !N.getColumn(),
1083  "cannot have column info without line info", &N);
1084 }
1085 
1086 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1087  visitDILexicalBlockBase(N);
1088 }
1089 
1090 void Verifier::visitDINamespace(const DINamespace &N) {
1091  AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1092  if (auto *S = N.getRawScope())
1093  AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1094 }
1095 
1096 void Verifier::visitDIMacro(const DIMacro &N) {
1099  "invalid macinfo type", &N);
1100  AssertDI(!N.getName().empty(), "anonymous macro", &N);
1101  if (!N.getValue().empty()) {
1102  assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1103  }
1104 }
1105 
1106 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1108  "invalid macinfo type", &N);
1109  if (auto *F = N.getRawFile())
1110  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1111 
1112  if (auto *Array = N.getRawElements()) {
1113  AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1114  for (Metadata *Op : N.getElements()->operands()) {
1115  AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1116  }
1117  }
1118 }
1119 
1120 void Verifier::visitDIModule(const DIModule &N) {
1121  AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1122  AssertDI(!N.getName().empty(), "anonymous module", &N);
1123 }
1124 
1125 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1126  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1127 }
1128 
1129 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1130  visitDITemplateParameter(N);
1131 
1132  AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1133  &N);
1134 }
1135 
1136 void Verifier::visitDITemplateValueParameter(
1137  const DITemplateValueParameter &N) {
1138  visitDITemplateParameter(N);
1139 
1140  AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1141  N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1142  N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1143  "invalid tag", &N);
1144 }
1145 
1146 void Verifier::visitDIVariable(const DIVariable &N) {
1147  if (auto *S = N.getRawScope())
1148  AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1149  if (auto *F = N.getRawFile())
1150  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1151 }
1152 
1153 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1154  // Checks common to all variables.
1155  visitDIVariable(N);
1156 
1157  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1158  AssertDI(!N.getName().empty(), "missing global variable name", &N);
1159  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1160  AssertDI(N.getType(), "missing global variable type", &N);
1161  if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1162  AssertDI(isa<DIDerivedType>(Member),
1163  "invalid static data member declaration", &N, Member);
1164  }
1165 }
1166 
1167 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1168  // Checks common to all variables.
1169  visitDIVariable(N);
1170 
1171  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1172  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1173  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1174  "local variable requires a valid scope", &N, N.getRawScope());
1175 }
1176 
1177 void Verifier::visitDIExpression(const DIExpression &N) {
1178  AssertDI(N.isValid(), "invalid expression", &N);
1179 }
1180 
1181 void Verifier::visitDIGlobalVariableExpression(
1182  const DIGlobalVariableExpression &GVE) {
1183  AssertDI(GVE.getVariable(), "missing variable");
1184  if (auto *Var = GVE.getVariable())
1185  visitDIGlobalVariable(*Var);
1186  if (auto *Expr = GVE.getExpression()) {
1187  visitDIExpression(*Expr);
1188  if (auto Fragment = Expr->getFragmentInfo())
1189  verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1190  }
1191 }
1192 
1193 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1194  AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1195  if (auto *T = N.getRawType())
1196  AssertDI(isType(T), "invalid type ref", &N, T);
1197  if (auto *F = N.getRawFile())
1198  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1199 }
1200 
1201 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1202  AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1203  N.getTag() == dwarf::DW_TAG_imported_declaration,
1204  "invalid tag", &N);
1205  if (auto *S = N.getRawScope())
1206  AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1207  AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1208  N.getRawEntity());
1209 }
1210 
1211 void Verifier::visitComdat(const Comdat &C) {
1212  // The Module is invalid if the GlobalValue has private linkage. Entities
1213  // with private linkage don't have entries in the symbol table.
1214  if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1215  Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1216  GV);
1217 }
1218 
1219 void Verifier::visitModuleIdents(const Module &M) {
1220  const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1221  if (!Idents)
1222  return;
1223 
1224  // llvm.ident takes a list of metadata entry. Each entry has only one string.
1225  // Scan each llvm.ident entry and make sure that this requirement is met.
1226  for (const MDNode *N : Idents->operands()) {
1227  Assert(N->getNumOperands() == 1,
1228  "incorrect number of operands in llvm.ident metadata", N);
1229  Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1230  ("invalid value for llvm.ident metadata entry operand"
1231  "(the operand should be a string)"),
1232  N->getOperand(0));
1233  }
1234 }
1235 
1236 void Verifier::visitModuleFlags(const Module &M) {
1238  if (!Flags) return;
1239 
1240  // Scan each flag, and track the flags and requirements.
1242  SmallVector<const MDNode*, 16> Requirements;
1243  for (const MDNode *MDN : Flags->operands())
1244  visitModuleFlag(MDN, SeenIDs, Requirements);
1245 
1246  // Validate that the requirements in the module are valid.
1247  for (const MDNode *Requirement : Requirements) {
1248  const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1249  const Metadata *ReqValue = Requirement->getOperand(1);
1250 
1251  const MDNode *Op = SeenIDs.lookup(Flag);
1252  if (!Op) {
1253  CheckFailed("invalid requirement on flag, flag is not present in module",
1254  Flag);
1255  continue;
1256  }
1257 
1258  if (Op->getOperand(2) != ReqValue) {
1259  CheckFailed(("invalid requirement on flag, "
1260  "flag does not have the required value"),
1261  Flag);
1262  continue;
1263  }
1264  }
1265 }
1266 
1267 void
1268 Verifier::visitModuleFlag(const MDNode *Op,
1270  SmallVectorImpl<const MDNode *> &Requirements) {
1271  // Each module flag should have three arguments, the merge behavior (a
1272  // constant int), the flag ID (an MDString), and the value.
1273  Assert(Op->getNumOperands() == 3,
1274  "incorrect number of operands in module flag", Op);
1276  if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1277  Assert(
1278  mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1279  "invalid behavior operand in module flag (expected constant integer)",
1280  Op->getOperand(0));
1281  Assert(false,
1282  "invalid behavior operand in module flag (unexpected constant)",
1283  Op->getOperand(0));
1284  }
1285  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1286  Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1287  Op->getOperand(1));
1288 
1289  // Sanity check the values for behaviors with additional requirements.
1290  switch (MFB) {
1291  case Module::Error:
1292  case Module::Warning:
1293  case Module::Override:
1294  // These behavior types accept any value.
1295  break;
1296 
1297  case Module::Max: {
1298  Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1299  "invalid value for 'max' module flag (expected constant integer)",
1300  Op->getOperand(2));
1301  break;
1302  }
1303 
1304  case Module::Require: {
1305  // The value should itself be an MDNode with two operands, a flag ID (an
1306  // MDString), and a value.
1307  MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1308  Assert(Value && Value->getNumOperands() == 2,
1309  "invalid value for 'require' module flag (expected metadata pair)",
1310  Op->getOperand(2));
1311  Assert(isa<MDString>(Value->getOperand(0)),
1312  ("invalid value for 'require' module flag "
1313  "(first value operand should be a string)"),
1314  Value->getOperand(0));
1315 
1316  // Append it to the list of requirements, to check once all module flags are
1317  // scanned.
1318  Requirements.push_back(Value);
1319  break;
1320  }
1321 
1322  case Module::Append:
1323  case Module::AppendUnique: {
1324  // These behavior types require the operand be an MDNode.
1325  Assert(isa<MDNode>(Op->getOperand(2)),
1326  "invalid value for 'append'-type module flag "
1327  "(expected a metadata node)",
1328  Op->getOperand(2));
1329  break;
1330  }
1331  }
1332 
1333  // Unless this is a "requires" flag, check the ID is unique.
1334  if (MFB != Module::Require) {
1335  bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1336  Assert(Inserted,
1337  "module flag identifiers must be unique (or of 'require' type)", ID);
1338  }
1339 
1340  if (ID->getString() == "wchar_size") {
1342  = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1343  Assert(Value, "wchar_size metadata requires constant integer argument");
1344  }
1345 
1346  if (ID->getString() == "Linker Options") {
1347  // If the llvm.linker.options named metadata exists, we assume that the
1348  // bitcode reader has upgraded the module flag. Otherwise the flag might
1349  // have been created by a client directly.
1350  Assert(M.getNamedMetadata("llvm.linker.options"),
1351  "'Linker Options' named metadata no longer supported");
1352  }
1353 }
1354 
1355 /// Return true if this attribute kind only applies to functions.
1357  switch (Kind) {
1358  case Attribute::NoReturn:
1359  case Attribute::NoUnwind:
1360  case Attribute::NoInline:
1361  case Attribute::AlwaysInline:
1362  case Attribute::OptimizeForSize:
1363  case Attribute::StackProtect:
1364  case Attribute::StackProtectReq:
1365  case Attribute::StackProtectStrong:
1366  case Attribute::SafeStack:
1367  case Attribute::NoRedZone:
1368  case Attribute::NoImplicitFloat:
1369  case Attribute::Naked:
1370  case Attribute::InlineHint:
1371  case Attribute::StackAlignment:
1372  case Attribute::UWTable:
1373  case Attribute::NonLazyBind:
1374  case Attribute::ReturnsTwice:
1375  case Attribute::SanitizeAddress:
1376  case Attribute::SanitizeThread:
1377  case Attribute::SanitizeMemory:
1378  case Attribute::MinSize:
1379  case Attribute::NoDuplicate:
1380  case Attribute::Builtin:
1381  case Attribute::NoBuiltin:
1382  case Attribute::Cold:
1383  case Attribute::OptimizeNone:
1384  case Attribute::JumpTable:
1385  case Attribute::Convergent:
1386  case Attribute::ArgMemOnly:
1387  case Attribute::NoRecurse:
1388  case Attribute::InaccessibleMemOnly:
1389  case Attribute::InaccessibleMemOrArgMemOnly:
1390  case Attribute::AllocSize:
1391  case Attribute::Speculatable:
1392  case Attribute::StrictFP:
1393  return true;
1394  default:
1395  break;
1396  }
1397  return false;
1398 }
1399 
1400 /// Return true if this is a function attribute that can also appear on
1401 /// arguments.
1403  return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1404  Kind == Attribute::ReadNone;
1405 }
1406 
1407 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1408  const Value *V) {
1409  for (Attribute A : Attrs) {
1410  if (A.isStringAttribute())
1411  continue;
1412 
1413  if (isFuncOnlyAttr(A.getKindAsEnum())) {
1414  if (!IsFunction) {
1415  CheckFailed("Attribute '" + A.getAsString() +
1416  "' only applies to functions!",
1417  V);
1418  return;
1419  }
1420  } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1421  CheckFailed("Attribute '" + A.getAsString() +
1422  "' does not apply to functions!",
1423  V);
1424  return;
1425  }
1426  }
1427 }
1428 
1429 // VerifyParameterAttrs - Check the given attributes for an argument or return
1430 // value of the specified type. The value V is printed in error messages.
1431 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1432  const Value *V) {
1433  if (!Attrs.hasAttributes())
1434  return;
1435 
1436  verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1437 
1438  // Check for mutually incompatible attributes. Only inreg is compatible with
1439  // sret.
1440  unsigned AttrCount = 0;
1441  AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1442  AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1443  AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1444  Attrs.hasAttribute(Attribute::InReg);
1445  AttrCount += Attrs.hasAttribute(Attribute::Nest);
1446  Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1447  "and 'sret' are incompatible!",
1448  V);
1449 
1450  Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1451  Attrs.hasAttribute(Attribute::ReadOnly)),
1452  "Attributes "
1453  "'inalloca and readonly' are incompatible!",
1454  V);
1455 
1456  Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1457  Attrs.hasAttribute(Attribute::Returned)),
1458  "Attributes "
1459  "'sret and returned' are incompatible!",
1460  V);
1461 
1462  Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1463  Attrs.hasAttribute(Attribute::SExt)),
1464  "Attributes "
1465  "'zeroext and signext' are incompatible!",
1466  V);
1467 
1468  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1469  Attrs.hasAttribute(Attribute::ReadOnly)),
1470  "Attributes "
1471  "'readnone and readonly' are incompatible!",
1472  V);
1473 
1474  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1475  Attrs.hasAttribute(Attribute::WriteOnly)),
1476  "Attributes "
1477  "'readnone and writeonly' are incompatible!",
1478  V);
1479 
1480  Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1481  Attrs.hasAttribute(Attribute::WriteOnly)),
1482  "Attributes "
1483  "'readonly and writeonly' are incompatible!",
1484  V);
1485 
1486  Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1487  Attrs.hasAttribute(Attribute::AlwaysInline)),
1488  "Attributes "
1489  "'noinline and alwaysinline' are incompatible!",
1490  V);
1491 
1492  AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1493  Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1494  "Wrong types for attribute: " +
1495  AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1496  V);
1497 
1498  if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1499  SmallPtrSet<Type*, 4> Visited;
1500  if (!PTy->getElementType()->isSized(&Visited)) {
1501  Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1502  !Attrs.hasAttribute(Attribute::InAlloca),
1503  "Attributes 'byval' and 'inalloca' do not support unsized types!",
1504  V);
1505  }
1506  if (!isa<PointerType>(PTy->getElementType()))
1507  Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1508  "Attribute 'swifterror' only applies to parameters "
1509  "with pointer to pointer type!",
1510  V);
1511  } else {
1512  Assert(!Attrs.hasAttribute(Attribute::ByVal),
1513  "Attribute 'byval' only applies to parameters with pointer type!",
1514  V);
1515  Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1516  "Attribute 'swifterror' only applies to parameters "
1517  "with pointer type!",
1518  V);
1519  }
1520 }
1521 
1522 // Check parameter attributes against a function type.
1523 // The value V is printed in error messages.
1524 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1525  const Value *V) {
1526  if (Attrs.isEmpty())
1527  return;
1528 
1529  bool SawNest = false;
1530  bool SawReturned = false;
1531  bool SawSRet = false;
1532  bool SawSwiftSelf = false;
1533  bool SawSwiftError = false;
1534 
1535  // Verify return value attributes.
1536  AttributeSet RetAttrs = Attrs.getRetAttributes();
1537  Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1538  !RetAttrs.hasAttribute(Attribute::Nest) &&
1539  !RetAttrs.hasAttribute(Attribute::StructRet) &&
1540  !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1541  !RetAttrs.hasAttribute(Attribute::Returned) &&
1542  !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1543  !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1544  !RetAttrs.hasAttribute(Attribute::SwiftError)),
1545  "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
1546  "'returned', 'swiftself', and 'swifterror' do not apply to return "
1547  "values!",
1548  V);
1549  Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1550  !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1551  !RetAttrs.hasAttribute(Attribute::ReadNone)),
1552  "Attribute '" + RetAttrs.getAsString() +
1553  "' does not apply to function returns",
1554  V);
1555  verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1556 
1557  // Verify parameter attributes.
1558  for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1559  Type *Ty = FT->getParamType(i);
1560  AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1561 
1562  verifyParameterAttrs(ArgAttrs, Ty, V);
1563 
1564  if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1565  Assert(!SawNest, "More than one parameter has attribute nest!", V);
1566  SawNest = true;
1567  }
1568 
1569  if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1570  Assert(!SawReturned, "More than one parameter has attribute returned!",
1571  V);
1573  "Incompatible argument and return types for 'returned' attribute",
1574  V);
1575  SawReturned = true;
1576  }
1577 
1578  if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1579  Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1580  Assert(i == 0 || i == 1,
1581  "Attribute 'sret' is not on first or second parameter!", V);
1582  SawSRet = true;
1583  }
1584 
1585  if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1586  Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1587  SawSwiftSelf = true;
1588  }
1589 
1590  if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1591  Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1592  V);
1593  SawSwiftError = true;
1594  }
1595 
1596  if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1597  Assert(i == FT->getNumParams() - 1,
1598  "inalloca isn't on the last parameter!", V);
1599  }
1600  }
1601 
1603  return;
1604 
1605  verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1606 
1607  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1608  Attrs.hasFnAttribute(Attribute::ReadOnly)),
1609  "Attributes 'readnone and readonly' are incompatible!", V);
1610 
1611  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1612  Attrs.hasFnAttribute(Attribute::WriteOnly)),
1613  "Attributes 'readnone and writeonly' are incompatible!", V);
1614 
1615  Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1616  Attrs.hasFnAttribute(Attribute::WriteOnly)),
1617  "Attributes 'readonly and writeonly' are incompatible!", V);
1618 
1619  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1620  Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1621  "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1622  "incompatible!",
1623  V);
1624 
1625  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1626  Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1627  "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1628 
1629  Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1630  Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1631  "Attributes 'noinline and alwaysinline' are incompatible!", V);
1632 
1633  if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1634  Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1635  "Attribute 'optnone' requires 'noinline'!", V);
1636 
1637  Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1638  "Attributes 'optsize and optnone' are incompatible!", V);
1639 
1640  Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1641  "Attributes 'minsize and optnone' are incompatible!", V);
1642  }
1643 
1644  if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1645  const GlobalValue *GV = cast<GlobalValue>(V);
1647  "Attribute 'jumptable' requires 'unnamed_addr'", V);
1648  }
1649 
1650  if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1651  std::pair<unsigned, Optional<unsigned>> Args =
1653 
1654  auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1655  if (ParamNo >= FT->getNumParams()) {
1656  CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1657  return false;
1658  }
1659 
1660  if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1661  CheckFailed("'allocsize' " + Name +
1662  " argument must refer to an integer parameter",
1663  V);
1664  return false;
1665  }
1666 
1667  return true;
1668  };
1669 
1670  if (!CheckParam("element size", Args.first))
1671  return;
1672 
1673  if (Args.second && !CheckParam("number of elements", *Args.second))
1674  return;
1675  }
1676 }
1677 
1678 void Verifier::verifyFunctionMetadata(
1679  ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1680  for (const auto &Pair : MDs) {
1681  if (Pair.first == LLVMContext::MD_prof) {
1682  MDNode *MD = Pair.second;
1683  Assert(MD->getNumOperands() >= 2,
1684  "!prof annotations should have no less than 2 operands", MD);
1685 
1686  // Check first operand.
1687  Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1688  MD);
1689  Assert(isa<MDString>(MD->getOperand(0)),
1690  "expected string with name of the !prof annotation", MD);
1691  MDString *MDS = cast<MDString>(MD->getOperand(0));
1692  StringRef ProfName = MDS->getString();
1693  Assert(ProfName.equals("function_entry_count"),
1694  "first operand should be 'function_entry_count'", MD);
1695 
1696  // Check second operand.
1697  Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1698  MD);
1699  Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1700  "expected integer argument to function_entry_count", MD);
1701  }
1702  }
1703 }
1704 
1705 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1706  if (!ConstantExprVisited.insert(EntryC).second)
1707  return;
1708 
1710  Stack.push_back(EntryC);
1711 
1712  while (!Stack.empty()) {
1713  const Constant *C = Stack.pop_back_val();
1714 
1715  // Check this constant expression.
1716  if (const auto *CE = dyn_cast<ConstantExpr>(C))
1717  visitConstantExpr(CE);
1718 
1719  if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1720  // Global Values get visited separately, but we do need to make sure
1721  // that the global value is in the correct module
1722  Assert(GV->getParent() == &M, "Referencing global in another module!",
1723  EntryC, &M, GV, GV->getParent());
1724  continue;
1725  }
1726 
1727  // Visit all sub-expressions.
1728  for (const Use &U : C->operands()) {
1729  const auto *OpC = dyn_cast<Constant>(U);
1730  if (!OpC)
1731  continue;
1732  if (!ConstantExprVisited.insert(OpC).second)
1733  continue;
1734  Stack.push_back(OpC);
1735  }
1736  }
1737 }
1738 
1739 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1740  if (CE->getOpcode() == Instruction::BitCast)
1741  Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1742  CE->getType()),
1743  "Invalid bitcast", CE);
1744 
1745  if (CE->getOpcode() == Instruction::IntToPtr ||
1746  CE->getOpcode() == Instruction::PtrToInt) {
1747  auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1748  ? CE->getType()
1749  : CE->getOperand(0)->getType();
1750  StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1751  ? "inttoptr not supported for non-integral pointers"
1752  : "ptrtoint not supported for non-integral pointers";
1753  Assert(
1754  !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1755  Msg);
1756  }
1757 }
1758 
1759 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1760  // There shouldn't be more attribute sets than there are parameters plus the
1761  // function and return value.
1762  return Attrs.getNumAttrSets() <= Params + 2;
1763 }
1764 
1765 /// Verify that statepoint intrinsic is well formed.
1766 void Verifier::verifyStatepoint(ImmutableCallSite CS) {
1767  assert(CS.getCalledFunction() &&
1769  Intrinsic::experimental_gc_statepoint);
1770 
1771  const Instruction &CI = *CS.getInstruction();
1772 
1773  Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
1774  !CS.onlyAccessesArgMemory(),
1775  "gc.statepoint must read and write all memory to preserve "
1776  "reordering restrictions required by safepoint semantics",
1777  &CI);
1778 
1779  const Value *IDV = CS.getArgument(0);
1780  Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
1781  &CI);
1782 
1783  const Value *NumPatchBytesV = CS.getArgument(1);
1784  Assert(isa<ConstantInt>(NumPatchBytesV),
1785  "gc.statepoint number of patchable bytes must be a constant integer",
1786  &CI);
1787  const int64_t NumPatchBytes =
1788  cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
1789  assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1790  Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
1791  "positive",
1792  &CI);
1793 
1794  const Value *Target = CS.getArgument(2);
1795  auto *PT = dyn_cast<PointerType>(Target->getType());
1796  Assert(PT && PT->getElementType()->isFunctionTy(),
1797  "gc.statepoint callee must be of function pointer type", &CI, Target);
1798  FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1799 
1800  const Value *NumCallArgsV = CS.getArgument(3);
1801  Assert(isa<ConstantInt>(NumCallArgsV),
1802  "gc.statepoint number of arguments to underlying call "
1803  "must be constant integer",
1804  &CI);
1805  const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
1806  Assert(NumCallArgs >= 0,
1807  "gc.statepoint number of arguments to underlying call "
1808  "must be positive",
1809  &CI);
1810  const int NumParams = (int)TargetFuncType->getNumParams();
1811  if (TargetFuncType->isVarArg()) {
1812  Assert(NumCallArgs >= NumParams,
1813  "gc.statepoint mismatch in number of vararg call args", &CI);
1814 
1815  // TODO: Remove this limitation
1816  Assert(TargetFuncType->getReturnType()->isVoidTy(),
1817  "gc.statepoint doesn't support wrapping non-void "
1818  "vararg functions yet",
1819  &CI);
1820  } else
1821  Assert(NumCallArgs == NumParams,
1822  "gc.statepoint mismatch in number of call args", &CI);
1823 
1824  const Value *FlagsV = CS.getArgument(4);
1825  Assert(isa<ConstantInt>(FlagsV),
1826  "gc.statepoint flags must be constant integer", &CI);
1827  const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
1828  Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
1829  "unknown flag used in gc.statepoint flags argument", &CI);
1830 
1831  // Verify that the types of the call parameter arguments match
1832  // the type of the wrapped callee.
1833  for (int i = 0; i < NumParams; i++) {
1834  Type *ParamType = TargetFuncType->getParamType(i);
1835  Type *ArgType = CS.getArgument(5 + i)->getType();
1836  Assert(ArgType == ParamType,
1837  "gc.statepoint call argument does not match wrapped "
1838  "function type",
1839  &CI);
1840  }
1841 
1842  const int EndCallArgsInx = 4 + NumCallArgs;
1843 
1844  const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
1845  Assert(isa<ConstantInt>(NumTransitionArgsV),
1846  "gc.statepoint number of transition arguments "
1847  "must be constant integer",
1848  &CI);
1849  const int NumTransitionArgs =
1850  cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
1851  Assert(NumTransitionArgs >= 0,
1852  "gc.statepoint number of transition arguments must be positive", &CI);
1853  const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
1854 
1855  const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
1856  Assert(isa<ConstantInt>(NumDeoptArgsV),
1857  "gc.statepoint number of deoptimization arguments "
1858  "must be constant integer",
1859  &CI);
1860  const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
1861  Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
1862  "must be positive",
1863  &CI);
1864 
1865  const int ExpectedNumArgs =
1866  7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
1867  Assert(ExpectedNumArgs <= (int)CS.arg_size(),
1868  "gc.statepoint too few arguments according to length fields", &CI);
1869 
1870  // Check that the only uses of this gc.statepoint are gc.result or
1871  // gc.relocate calls which are tied to this statepoint and thus part
1872  // of the same statepoint sequence
1873  for (const User *U : CI.users()) {
1874  const CallInst *Call = dyn_cast<const CallInst>(U);
1875  Assert(Call, "illegal use of statepoint token", &CI, U);
1876  if (!Call) continue;
1877  Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
1878  "gc.result or gc.relocate are the only value uses "
1879  "of a gc.statepoint",
1880  &CI, U);
1881  if (isa<GCResultInst>(Call)) {
1882  Assert(Call->getArgOperand(0) == &CI,
1883  "gc.result connected to wrong gc.statepoint", &CI, Call);
1884  } else if (isa<GCRelocateInst>(Call)) {
1885  Assert(Call->getArgOperand(0) == &CI,
1886  "gc.relocate connected to wrong gc.statepoint", &CI, Call);
1887  }
1888  }
1889 
1890  // Note: It is legal for a single derived pointer to be listed multiple
1891  // times. It's non-optimal, but it is legal. It can also happen after
1892  // insertion if we strip a bitcast away.
1893  // Note: It is really tempting to check that each base is relocated and
1894  // that a derived pointer is never reused as a base pointer. This turns
1895  // out to be problematic since optimizations run after safepoint insertion
1896  // can recognize equality properties that the insertion logic doesn't know
1897  // about. See example statepoint.ll in the verifier subdirectory
1898 }
1899 
1900 void Verifier::verifyFrameRecoverIndices() {
1901  for (auto &Counts : FrameEscapeInfo) {
1902  Function *F = Counts.first;
1903  unsigned EscapedObjectCount = Counts.second.first;
1904  unsigned MaxRecoveredIndex = Counts.second.second;
1905  Assert(MaxRecoveredIndex <= EscapedObjectCount,
1906  "all indices passed to llvm.localrecover must be less than the "
1907  "number of arguments passed ot llvm.localescape in the parent "
1908  "function",
1909  F);
1910  }
1911 }
1912 
1914  BasicBlock *UnwindDest;
1915  if (auto *II = dyn_cast<InvokeInst>(Terminator))
1916  UnwindDest = II->getUnwindDest();
1917  else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
1918  UnwindDest = CSI->getUnwindDest();
1919  else
1920  UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
1921  return UnwindDest->getFirstNonPHI();
1922 }
1923 
1924 void Verifier::verifySiblingFuncletUnwinds() {
1927  for (const auto &Pair : SiblingFuncletInfo) {
1928  Instruction *PredPad = Pair.first;
1929  if (Visited.count(PredPad))
1930  continue;
1931  Active.insert(PredPad);
1932  TerminatorInst *Terminator = Pair.second;
1933  do {
1934  Instruction *SuccPad = getSuccPad(Terminator);
1935  if (Active.count(SuccPad)) {
1936  // Found a cycle; report error
1937  Instruction *CyclePad = SuccPad;
1938  SmallVector<Instruction *, 8> CycleNodes;
1939  do {
1940  CycleNodes.push_back(CyclePad);
1941  TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad];
1942  if (CycleTerminator != CyclePad)
1943  CycleNodes.push_back(CycleTerminator);
1944  CyclePad = getSuccPad(CycleTerminator);
1945  } while (CyclePad != SuccPad);
1946  Assert(false, "EH pads can't handle each other's exceptions",
1947  ArrayRef<Instruction *>(CycleNodes));
1948  }
1949  // Don't re-walk a node we've already checked
1950  if (!Visited.insert(SuccPad).second)
1951  break;
1952  // Walk to this successor if it has a map entry.
1953  PredPad = SuccPad;
1954  auto TermI = SiblingFuncletInfo.find(PredPad);
1955  if (TermI == SiblingFuncletInfo.end())
1956  break;
1957  Terminator = TermI->second;
1958  Active.insert(PredPad);
1959  } while (true);
1960  // Each node only has one successor, so we've walked all the active
1961  // nodes' successors.
1962  Active.clear();
1963  }
1964 }
1965 
1966 // visitFunction - Verify that a function is ok.
1967 //
1968 void Verifier::visitFunction(const Function &F) {
1969  visitGlobalValue(F);
1970 
1971  // Check function arguments.
1972  FunctionType *FT = F.getFunctionType();
1973  unsigned NumArgs = F.arg_size();
1974 
1975  Assert(&Context == &F.getContext(),
1976  "Function context does not match Module context!", &F);
1977 
1978  Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
1979  Assert(FT->getNumParams() == NumArgs,
1980  "# formal arguments must match # of arguments for function type!", &F,
1981  FT);
1982  Assert(F.getReturnType()->isFirstClassType() ||
1983  F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
1984  "Functions cannot return aggregate values!", &F);
1985 
1986  Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
1987  "Invalid struct return type!", &F);
1988 
1989  AttributeList Attrs = F.getAttributes();
1990 
1991  Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
1992  "Attribute after last parameter!", &F);
1993 
1994  // Check function attributes.
1995  verifyFunctionAttrs(FT, Attrs, &F);
1996 
1997  // On function declarations/definitions, we do not support the builtin
1998  // attribute. We do not check this in VerifyFunctionAttrs since that is
1999  // checking for Attributes that can/can not ever be on functions.
2000  Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2001  "Attribute 'builtin' can only be applied to a callsite.", &F);
2002 
2003  // Check that this function meets the restrictions on this calling convention.
2004  // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2005  // restrictions can be lifted.
2006  switch (F.getCallingConv()) {
2007  default:
2008  case CallingConv::C:
2009  break;
2012  Assert(F.getReturnType()->isVoidTy(),
2013  "Calling convention requires void return type", &F);
2020  Assert(!F.hasStructRetAttr(),
2021  "Calling convention does not allow sret", &F);
2023  case CallingConv::Fast:
2024  case CallingConv::Cold:
2028  Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2029  "perfect forwarding!",
2030  &F);
2031  break;
2032  }
2033 
2034  bool isLLVMdotName = F.getName().size() >= 5 &&
2035  F.getName().substr(0, 5) == "llvm.";
2036 
2037  // Check that the argument values match the function type for this function...
2038  unsigned i = 0;
2039  for (const Argument &Arg : F.args()) {
2040  Assert(Arg.getType() == FT->getParamType(i),
2041  "Argument value does not match function argument type!", &Arg,
2042  FT->getParamType(i));
2044  "Function arguments must have first-class types!", &Arg);
2045  if (!isLLVMdotName) {
2047  "Function takes metadata but isn't an intrinsic", &Arg, &F);
2048  Assert(!Arg.getType()->isTokenTy(),
2049  "Function takes token but isn't an intrinsic", &Arg, &F);
2050  }
2051 
2052  // Check that swifterror argument is only used by loads and stores.
2053  if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2054  verifySwiftErrorValue(&Arg);
2055  }
2056  ++i;
2057  }
2058 
2059  if (!isLLVMdotName)
2060  Assert(!F.getReturnType()->isTokenTy(),
2061  "Functions returns a token but isn't an intrinsic", &F);
2062 
2063  // Get the function metadata attachments.
2065  F.getAllMetadata(MDs);
2066  assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2067  verifyFunctionMetadata(MDs);
2068 
2069  // Check validity of the personality function
2070  if (F.hasPersonalityFn()) {
2071  auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2072  if (Per)
2073  Assert(Per->getParent() == F.getParent(),
2074  "Referencing personality function in another module!",
2075  &F, F.getParent(), Per, Per->getParent());
2076  }
2077 
2078  if (F.isMaterializable()) {
2079  // Function has a body somewhere we can't see.
2080  Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2081  MDs.empty() ? nullptr : MDs.front().second);
2082  } else if (F.isDeclaration()) {
2083  for (const auto &I : MDs) {
2084  AssertDI(I.first != LLVMContext::MD_dbg,
2085  "function declaration may not have a !dbg attachment", &F);
2086  Assert(I.first != LLVMContext::MD_prof,
2087  "function declaration may not have a !prof attachment", &F);
2088 
2089  // Verify the metadata itself.
2090  visitMDNode(*I.second);
2091  }
2092  Assert(!F.hasPersonalityFn(),
2093  "Function declaration shouldn't have a personality routine", &F);
2094  } else {
2095  // Verify that this function (which has a body) is not named "llvm.*". It
2096  // is not legal to define intrinsics.
2097  Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2098 
2099  // Check the entry node
2100  const BasicBlock *Entry = &F.getEntryBlock();
2101  Assert(pred_empty(Entry),
2102  "Entry block to function must not have predecessors!", Entry);
2103 
2104  // The address of the entry block cannot be taken, unless it is dead.
2105  if (Entry->hasAddressTaken()) {
2106  Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2107  "blockaddress may not be used with the entry block!", Entry);
2108  }
2109 
2110  unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2111  // Visit metadata attachments.
2112  for (const auto &I : MDs) {
2113  // Verify that the attachment is legal.
2114  switch (I.first) {
2115  default:
2116  break;
2117  case LLVMContext::MD_dbg: {
2118  ++NumDebugAttachments;
2119  AssertDI(NumDebugAttachments == 1,
2120  "function must have a single !dbg attachment", &F, I.second);
2121  AssertDI(isa<DISubprogram>(I.second),
2122  "function !dbg attachment must be a subprogram", &F, I.second);
2123  auto *SP = cast<DISubprogram>(I.second);
2124  const Function *&AttachedTo = DISubprogramAttachments[SP];
2125  AssertDI(!AttachedTo || AttachedTo == &F,
2126  "DISubprogram attached to more than one function", SP, &F);
2127  AttachedTo = &F;
2128  break;
2129  }
2130  case LLVMContext::MD_prof:
2131  ++NumProfAttachments;
2132  Assert(NumProfAttachments == 1,
2133  "function must have a single !prof attachment", &F, I.second);
2134  break;
2135  }
2136 
2137  // Verify the metadata itself.
2138  visitMDNode(*I.second);
2139  }
2140  }
2141 
2142  // If this function is actually an intrinsic, verify that it is only used in
2143  // direct call/invokes, never having its "address taken".
2144  // Only do this if the module is materialized, otherwise we don't have all the
2145  // uses.
2146  if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2147  const User *U;
2148  if (F.hasAddressTaken(&U))
2149  Assert(false, "Invalid user of intrinsic instruction!", U);
2150  }
2151 
2152  Assert(!F.hasDLLImportStorageClass() ||
2153  (F.isDeclaration() && F.hasExternalLinkage()) ||
2154  F.hasAvailableExternallyLinkage(),
2155  "Function is marked as dllimport, but not external.", &F);
2156 
2157  auto *N = F.getSubprogram();
2158  HasDebugInfo = (N != nullptr);
2159  if (!HasDebugInfo)
2160  return;
2161 
2162  // Check that all !dbg attachments lead to back to N (or, at least, another
2163  // subprogram that describes the same function).
2164  //
2165  // FIXME: Check this incrementally while visiting !dbg attachments.
2166  // FIXME: Only check when N is the canonical subprogram for F.
2168  for (auto &BB : F)
2169  for (auto &I : BB) {
2170  // Be careful about using DILocation here since we might be dealing with
2171  // broken code (this is the Verifier after all).
2172  DILocation *DL =
2173  dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
2174  if (!DL)
2175  continue;
2176  if (!Seen.insert(DL).second)
2177  continue;
2178 
2179  DILocalScope *Scope = DL->getInlinedAtScope();
2180  if (Scope && !Seen.insert(Scope).second)
2181  continue;
2182 
2183  DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2184 
2185  // Scope and SP could be the same MDNode and we don't want to skip
2186  // validation in that case
2187  if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2188  continue;
2189 
2190  // FIXME: Once N is canonical, check "SP == &N".
2191  AssertDI(SP->describes(&F),
2192  "!dbg attachment points at wrong subprogram for function", N, &F,
2193  &I, DL, Scope, SP);
2194  }
2195 }
2196 
2197 // verifyBasicBlock - Verify that a basic block is well formed...
2198 //
2199 void Verifier::visitBasicBlock(BasicBlock &BB) {
2200  InstsInThisBlock.clear();
2201 
2202  // Ensure that basic blocks have terminators!
2203  Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2204 
2205  // Check constraints that this basic block imposes on all of the PHI nodes in
2206  // it.
2207  if (isa<PHINode>(BB.front())) {
2210  std::sort(Preds.begin(), Preds.end());
2211  PHINode *PN;
2212  for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) {
2213  // Ensure that PHI nodes have at least one entry!
2214  Assert(PN->getNumIncomingValues() != 0,
2215  "PHI nodes must have at least one entry. If the block is dead, "
2216  "the PHI should be removed!",
2217  PN);
2218  Assert(PN->getNumIncomingValues() == Preds.size(),
2219  "PHINode should have one entry for each predecessor of its "
2220  "parent basic block!",
2221  PN);
2222 
2223  // Get and sort all incoming values in the PHI node...
2224  Values.clear();
2225  Values.reserve(PN->getNumIncomingValues());
2226  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2227  Values.push_back(std::make_pair(PN->getIncomingBlock(i),
2228  PN->getIncomingValue(i)));
2229  std::sort(Values.begin(), Values.end());
2230 
2231  for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2232  // Check to make sure that if there is more than one entry for a
2233  // particular basic block in this PHI node, that the incoming values are
2234  // all identical.
2235  //
2236  Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2237  Values[i].second == Values[i - 1].second,
2238  "PHI node has multiple entries for the same basic block with "
2239  "different incoming values!",
2240  PN, Values[i].first, Values[i].second, Values[i - 1].second);
2241 
2242  // Check to make sure that the predecessors and PHI node entries are
2243  // matched up.
2244  Assert(Values[i].first == Preds[i],
2245  "PHI node entries do not match predecessors!", PN,
2246  Values[i].first, Preds[i]);
2247  }
2248  }
2249  }
2250 
2251  // Check that all instructions have their parent pointers set up correctly.
2252  for (auto &I : BB)
2253  {
2254  Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2255  }
2256 }
2257 
2258 void Verifier::visitTerminatorInst(TerminatorInst &I) {
2259  // Ensure that terminators only exist at the end of the basic block.
2260  Assert(&I == I.getParent()->getTerminator(),
2261  "Terminator found in the middle of a basic block!", I.getParent());
2262  visitInstruction(I);
2263 }
2264 
2265 void Verifier::visitBranchInst(BranchInst &BI) {
2266  if (BI.isConditional()) {
2267  Assert(BI.getCondition()->getType()->isIntegerTy(1),
2268  "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2269  }
2270  visitTerminatorInst(BI);
2271 }
2272 
2273 void Verifier::visitReturnInst(ReturnInst &RI) {
2274  Function *F = RI.getParent()->getParent();
2275  unsigned N = RI.getNumOperands();
2276  if (F->getReturnType()->isVoidTy())
2277  Assert(N == 0,
2278  "Found return instr that returns non-void in Function of void "
2279  "return type!",
2280  &RI, F->getReturnType());
2281  else
2282  Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2283  "Function return type does not match operand "
2284  "type of return inst!",
2285  &RI, F->getReturnType());
2286 
2287  // Check to make sure that the return value has necessary properties for
2288  // terminators...
2289  visitTerminatorInst(RI);
2290 }
2291 
2292 void Verifier::visitSwitchInst(SwitchInst &SI) {
2293  // Check to make sure that all of the constants in the switch instruction
2294  // have the same type as the switched-on value.
2295  Type *SwitchTy = SI.getCondition()->getType();
2297  for (auto &Case : SI.cases()) {
2298  Assert(Case.getCaseValue()->getType() == SwitchTy,
2299  "Switch constants must all be same type as switch value!", &SI);
2300  Assert(Constants.insert(Case.getCaseValue()).second,
2301  "Duplicate integer as switch case", &SI, Case.getCaseValue());
2302  }
2303 
2304  visitTerminatorInst(SI);
2305 }
2306 
2307 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2308  Assert(BI.getAddress()->getType()->isPointerTy(),
2309  "Indirectbr operand must have pointer type!", &BI);
2310  for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2311  Assert(BI.getDestination(i)->getType()->isLabelTy(),
2312  "Indirectbr destinations must all have pointer type!", &BI);
2313 
2314  visitTerminatorInst(BI);
2315 }
2316 
2317 void Verifier::visitSelectInst(SelectInst &SI) {
2319  SI.getOperand(2)),
2320  "Invalid operands for select instruction!", &SI);
2321 
2322  Assert(SI.getTrueValue()->getType() == SI.getType(),
2323  "Select values must have same type as select instruction!", &SI);
2324  visitInstruction(SI);
2325 }
2326 
2327 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2328 /// a pass, if any exist, it's an error.
2329 ///
2330 void Verifier::visitUserOp1(Instruction &I) {
2331  Assert(false, "User-defined operators should not live outside of a pass!", &I);
2332 }
2333 
2334 void Verifier::visitTruncInst(TruncInst &I) {
2335  // Get the source and destination types
2336  Type *SrcTy = I.getOperand(0)->getType();
2337  Type *DestTy = I.getType();
2338 
2339  // Get the size of the types in bits, we'll need this later
2340  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2341  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2342 
2343  Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2344  Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2345  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2346  "trunc source and destination must both be a vector or neither", &I);
2347  Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2348 
2349  visitInstruction(I);
2350 }
2351 
2352 void Verifier::visitZExtInst(ZExtInst &I) {
2353  // Get the source and destination types
2354  Type *SrcTy = I.getOperand(0)->getType();
2355  Type *DestTy = I.getType();
2356 
2357  // Get the size of the types in bits, we'll need this later
2358  Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2359  Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2360  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2361  "zext source and destination must both be a vector or neither", &I);
2362  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2363  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2364 
2365  Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2366 
2367  visitInstruction(I);
2368 }
2369 
2370 void Verifier::visitSExtInst(SExtInst &I) {
2371  // Get the source and destination types
2372  Type *SrcTy = I.getOperand(0)->getType();
2373  Type *DestTy = I.getType();
2374 
2375  // Get the size of the types in bits, we'll need this later
2376  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2377  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2378 
2379  Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2380  Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2381  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2382  "sext source and destination must both be a vector or neither", &I);
2383  Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2384 
2385  visitInstruction(I);
2386 }
2387 
2388 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2389  // Get the source and destination types
2390  Type *SrcTy = I.getOperand(0)->getType();
2391  Type *DestTy = I.getType();
2392  // Get the size of the types in bits, we'll need this later
2393  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2394  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2395 
2396  Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2397  Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2398  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2399  "fptrunc source and destination must both be a vector or neither", &I);
2400  Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2401 
2402  visitInstruction(I);
2403 }
2404 
2405 void Verifier::visitFPExtInst(FPExtInst &I) {
2406  // Get the source and destination types
2407  Type *SrcTy = I.getOperand(0)->getType();
2408  Type *DestTy = I.getType();
2409 
2410  // Get the size of the types in bits, we'll need this later
2411  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2412  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2413 
2414  Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2415  Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2416  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2417  "fpext source and destination must both be a vector or neither", &I);
2418  Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2419 
2420  visitInstruction(I);
2421 }
2422 
2423 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2424  // Get the source and destination types
2425  Type *SrcTy = I.getOperand(0)->getType();
2426  Type *DestTy = I.getType();
2427 
2428  bool SrcVec = SrcTy->isVectorTy();
2429  bool DstVec = DestTy->isVectorTy();
2430 
2431  Assert(SrcVec == DstVec,
2432  "UIToFP source and dest must both be vector or scalar", &I);
2433  Assert(SrcTy->isIntOrIntVectorTy(),
2434  "UIToFP source must be integer or integer vector", &I);
2435  Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2436  &I);
2437 
2438  if (SrcVec && DstVec)
2439  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2440  cast<VectorType>(DestTy)->getNumElements(),
2441  "UIToFP source and dest vector length mismatch", &I);
2442 
2443  visitInstruction(I);
2444 }
2445 
2446 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2447  // Get the source and destination types
2448  Type *SrcTy = I.getOperand(0)->getType();
2449  Type *DestTy = I.getType();
2450 
2451  bool SrcVec = SrcTy->isVectorTy();
2452  bool DstVec = DestTy->isVectorTy();
2453 
2454  Assert(SrcVec == DstVec,
2455  "SIToFP source and dest must both be vector or scalar", &I);
2456  Assert(SrcTy->isIntOrIntVectorTy(),
2457  "SIToFP source must be integer or integer vector", &I);
2458  Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2459  &I);
2460 
2461  if (SrcVec && DstVec)
2462  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2463  cast<VectorType>(DestTy)->getNumElements(),
2464  "SIToFP source and dest vector length mismatch", &I);
2465 
2466  visitInstruction(I);
2467 }
2468 
2469 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2470  // Get the source and destination types
2471  Type *SrcTy = I.getOperand(0)->getType();
2472  Type *DestTy = I.getType();
2473 
2474  bool SrcVec = SrcTy->isVectorTy();
2475  bool DstVec = DestTy->isVectorTy();
2476 
2477  Assert(SrcVec == DstVec,
2478  "FPToUI source and dest must both be vector or scalar", &I);
2479  Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2480  &I);
2481  Assert(DestTy->isIntOrIntVectorTy(),
2482  "FPToUI result must be integer or integer vector", &I);
2483 
2484  if (SrcVec && DstVec)
2485  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2486  cast<VectorType>(DestTy)->getNumElements(),
2487  "FPToUI source and dest vector length mismatch", &I);
2488 
2489  visitInstruction(I);
2490 }
2491 
2492 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2493  // Get the source and destination types
2494  Type *SrcTy = I.getOperand(0)->getType();
2495  Type *DestTy = I.getType();
2496 
2497  bool SrcVec = SrcTy->isVectorTy();
2498  bool DstVec = DestTy->isVectorTy();
2499 
2500  Assert(SrcVec == DstVec,
2501  "FPToSI source and dest must both be vector or scalar", &I);
2502  Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2503  &I);
2504  Assert(DestTy->isIntOrIntVectorTy(),
2505  "FPToSI result must be integer or integer vector", &I);
2506 
2507  if (SrcVec && DstVec)
2508  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2509  cast<VectorType>(DestTy)->getNumElements(),
2510  "FPToSI source and dest vector length mismatch", &I);
2511 
2512  visitInstruction(I);
2513 }
2514 
2515 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2516  // Get the source and destination types
2517  Type *SrcTy = I.getOperand(0)->getType();
2518  Type *DestTy = I.getType();
2519 
2520  Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2521 
2522  if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2524  "ptrtoint not supported for non-integral pointers");
2525 
2526  Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2527  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2528  &I);
2529 
2530  if (SrcTy->isVectorTy()) {
2531  VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2532  VectorType *VDest = dyn_cast<VectorType>(DestTy);
2533  Assert(VSrc->getNumElements() == VDest->getNumElements(),
2534  "PtrToInt Vector width mismatch", &I);
2535  }
2536 
2537  visitInstruction(I);
2538 }
2539 
2540 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2541  // Get the source and destination types
2542  Type *SrcTy = I.getOperand(0)->getType();
2543  Type *DestTy = I.getType();
2544 
2545  Assert(SrcTy->isIntOrIntVectorTy(),
2546  "IntToPtr source must be an integral", &I);
2547  Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2548 
2549  if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2551  "inttoptr not supported for non-integral pointers");
2552 
2553  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2554  &I);
2555  if (SrcTy->isVectorTy()) {
2556  VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2557  VectorType *VDest = dyn_cast<VectorType>(DestTy);
2558  Assert(VSrc->getNumElements() == VDest->getNumElements(),
2559  "IntToPtr Vector width mismatch", &I);
2560  }
2561  visitInstruction(I);
2562 }
2563 
2564 void Verifier::visitBitCastInst(BitCastInst &I) {
2565  Assert(
2566  CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2567  "Invalid bitcast", &I);
2568  visitInstruction(I);
2569 }
2570 
2571 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2572  Type *SrcTy = I.getOperand(0)->getType();
2573  Type *DestTy = I.getType();
2574 
2575  Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2576  &I);
2577  Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2578  &I);
2580  "AddrSpaceCast must be between different address spaces", &I);
2581  if (SrcTy->isVectorTy())
2582  Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2583  "AddrSpaceCast vector pointer number of elements mismatch", &I);
2584  visitInstruction(I);
2585 }
2586 
2587 /// visitPHINode - Ensure that a PHI node is well formed.
2588 ///
2589 void Verifier::visitPHINode(PHINode &PN) {
2590  // Ensure that the PHI nodes are all grouped together at the top of the block.
2591  // This can be tested by checking whether the instruction before this is
2592  // either nonexistent (because this is begin()) or is a PHI node. If not,
2593  // then there is some other instruction before a PHI.
2594  Assert(&PN == &PN.getParent()->front() ||
2595  isa<PHINode>(--BasicBlock::iterator(&PN)),
2596  "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2597 
2598  // Check that a PHI doesn't yield a Token.
2599  Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2600 
2601  // Check that all of the values of the PHI node have the same type as the
2602  // result, and that the incoming blocks are really basic blocks.
2603  for (Value *IncValue : PN.incoming_values()) {
2604  Assert(PN.getType() == IncValue->getType(),
2605  "PHI node operands are not the same type as the result!", &PN);
2606  }
2607 
2608  // All other PHI node constraints are checked in the visitBasicBlock method.
2609 
2610  visitInstruction(PN);
2611 }
2612 
2613 void Verifier::verifyCallSite(CallSite CS) {
2614  Instruction *I = CS.getInstruction();
2615 
2617  "Called function must be a pointer!", I);
2618  PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
2619 
2620  Assert(FPTy->getElementType()->isFunctionTy(),
2621  "Called function is not pointer to function type!", I);
2622 
2623  Assert(FPTy->getElementType() == CS.getFunctionType(),
2624  "Called function is not the same type as the call!", I);
2625 
2626  FunctionType *FTy = CS.getFunctionType();
2627 
2628  // Verify that the correct number of arguments are being passed
2629  if (FTy->isVarArg())
2630  Assert(CS.arg_size() >= FTy->getNumParams(),
2631  "Called function requires more parameters than were provided!", I);
2632  else
2633  Assert(CS.arg_size() == FTy->getNumParams(),
2634  "Incorrect number of arguments passed to called function!", I);
2635 
2636  // Verify that all arguments to the call match the function type.
2637  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2638  Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
2639  "Call parameter type does not match function signature!",
2640  CS.getArgument(i), FTy->getParamType(i), I);
2641 
2642  AttributeList Attrs = CS.getAttributes();
2643 
2644  Assert(verifyAttributeCount(Attrs, CS.arg_size()),
2645  "Attribute after last parameter!", I);
2646 
2647  if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2648  // Don't allow speculatable on call sites, unless the underlying function
2649  // declaration is also speculatable.
2650  Function *Callee
2652  Assert(Callee && Callee->isSpeculatable(),
2653  "speculatable attribute may not apply to call sites", I);
2654  }
2655 
2656  // Verify call attributes.
2657  verifyFunctionAttrs(FTy, Attrs, I);
2658 
2659  // Conservatively check the inalloca argument.
2660  // We have a bug if we can find that there is an underlying alloca without
2661  // inalloca.
2662  if (CS.hasInAllocaArgument()) {
2663  Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
2664  if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2665  Assert(AI->isUsedWithInAlloca(),
2666  "inalloca argument for call has mismatched alloca", AI, I);
2667  }
2668 
2669  // For each argument of the callsite, if it has the swifterror argument,
2670  // make sure the underlying alloca/parameter it comes from has a swifterror as
2671  // well.
2672  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2673  if (CS.paramHasAttr(i, Attribute::SwiftError)) {
2674  Value *SwiftErrorArg = CS.getArgument(i);
2675  if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2676  Assert(AI->isSwiftError(),
2677  "swifterror argument for call has mismatched alloca", AI, I);
2678  continue;
2679  }
2680  auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2681  Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I);
2682  Assert(ArgI->hasSwiftErrorAttr(),
2683  "swifterror argument for call has mismatched parameter", ArgI, I);
2684  }
2685 
2686  if (FTy->isVarArg()) {
2687  // FIXME? is 'nest' even legal here?
2688  bool SawNest = false;
2689  bool SawReturned = false;
2690 
2691  for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2692  if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2693  SawNest = true;
2694  if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2695  SawReturned = true;
2696  }
2697 
2698  // Check attributes on the varargs part.
2699  for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
2700  Type *Ty = CS.getArgument(Idx)->getType();
2701  AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2702  verifyParameterAttrs(ArgAttrs, Ty, I);
2703 
2704  if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2705  Assert(!SawNest, "More than one parameter has attribute nest!", I);
2706  SawNest = true;
2707  }
2708 
2709  if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2710  Assert(!SawReturned, "More than one parameter has attribute returned!",
2711  I);
2712  Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2713  "Incompatible argument and return types for 'returned' "
2714  "attribute",
2715  I);
2716  SawReturned = true;
2717  }
2718 
2719  Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2720  "Attribute 'sret' cannot be used for vararg call arguments!", I);
2721 
2722  if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2723  Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
2724  I);
2725  }
2726  }
2727 
2728  // Verify that there's no metadata unless it's a direct call to an intrinsic.
2729  if (CS.getCalledFunction() == nullptr ||
2730  !CS.getCalledFunction()->getName().startswith("llvm.")) {
2731  for (Type *ParamTy : FTy->params()) {
2732  Assert(!ParamTy->isMetadataTy(),
2733  "Function has metadata parameter but isn't an intrinsic", I);
2734  Assert(!ParamTy->isTokenTy(),
2735  "Function has token parameter but isn't an intrinsic", I);
2736  }
2737  }
2738 
2739  // Verify that indirect calls don't return tokens.
2740  if (CS.getCalledFunction() == nullptr)
2741  Assert(!FTy->getReturnType()->isTokenTy(),
2742  "Return type cannot be token for indirect call!");
2743 
2744  if (Function *F = CS.getCalledFunction())
2746  visitIntrinsicCallSite(ID, CS);
2747 
2748  // Verify that a callsite has at most one "deopt", at most one "funclet" and
2749  // at most one "gc-transition" operand bundle.
2750  bool FoundDeoptBundle = false, FoundFuncletBundle = false,
2751  FoundGCTransitionBundle = false;
2752  for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
2754  uint32_t Tag = BU.getTagID();
2755  if (Tag == LLVMContext::OB_deopt) {
2756  Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
2757  FoundDeoptBundle = true;
2758  } else if (Tag == LLVMContext::OB_gc_transition) {
2759  Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
2760  I);
2761  FoundGCTransitionBundle = true;
2762  } else if (Tag == LLVMContext::OB_funclet) {
2763  Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
2764  FoundFuncletBundle = true;
2765  Assert(BU.Inputs.size() == 1,
2766  "Expected exactly one funclet bundle operand", I);
2767  Assert(isa<FuncletPadInst>(BU.Inputs.front()),
2768  "Funclet bundle operands should correspond to a FuncletPadInst",
2769  I);
2770  }
2771  }
2772 
2773  // Verify that each inlinable callsite of a debug-info-bearing function in a
2774  // debug-info-bearing function has a debug location attached to it. Failure to
2775  // do so causes assertion failures when the inliner sets up inline scope info.
2776  if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
2778  AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
2779  "debug info must have a !dbg location",
2780  I);
2781 
2782  visitInstruction(*I);
2783 }
2784 
2785 /// Two types are "congruent" if they are identical, or if they are both pointer
2786 /// types with different pointee types and the same address space.
2787 static bool isTypeCongruent(Type *L, Type *R) {
2788  if (L == R)
2789  return true;
2792  if (!PL || !PR)
2793  return false;
2794  return PL->getAddressSpace() == PR->getAddressSpace();
2795 }
2796 
2798  static const Attribute::AttrKind ABIAttrs[] = {
2799  Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
2800  Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
2801  Attribute::SwiftError};
2802  AttrBuilder Copy;
2803  for (auto AK : ABIAttrs) {
2804  if (Attrs.hasParamAttribute(I, AK))
2805  Copy.addAttribute(AK);
2806  }
2807  if (Attrs.hasParamAttribute(I, Attribute::Alignment))
2808  Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
2809  return Copy;
2810 }
2811 
2812 void Verifier::verifyMustTailCall(CallInst &CI) {
2813  Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
2814 
2815  // - The caller and callee prototypes must match. Pointer types of
2816  // parameters or return types may differ in pointee type, but not
2817  // address space.
2818  Function *F = CI.getParent()->getParent();
2819  FunctionType *CallerTy = F->getFunctionType();
2820  FunctionType *CalleeTy = CI.getFunctionType();
2821  Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
2822  "cannot guarantee tail call due to mismatched parameter counts", &CI);
2823  Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
2824  "cannot guarantee tail call due to mismatched varargs", &CI);
2825  Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
2826  "cannot guarantee tail call due to mismatched return types", &CI);
2827  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2828  Assert(
2829  isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
2830  "cannot guarantee tail call due to mismatched parameter types", &CI);
2831  }
2832 
2833  // - The calling conventions of the caller and callee must match.
2834  Assert(F->getCallingConv() == CI.getCallingConv(),
2835  "cannot guarantee tail call due to mismatched calling conv", &CI);
2836 
2837  // - All ABI-impacting function attributes, such as sret, byval, inreg,
2838  // returned, and inalloca, must match.
2839  AttributeList CallerAttrs = F->getAttributes();
2840  AttributeList CalleeAttrs = CI.getAttributes();
2841  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2842  AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
2843  AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
2844  Assert(CallerABIAttrs == CalleeABIAttrs,
2845  "cannot guarantee tail call due to mismatched ABI impacting "
2846  "function attributes",
2847  &CI, CI.getOperand(I));
2848  }
2849 
2850  // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
2851  // or a pointer bitcast followed by a ret instruction.
2852  // - The ret instruction must return the (possibly bitcasted) value
2853  // produced by the call or void.
2854  Value *RetVal = &CI;
2855  Instruction *Next = CI.getNextNode();
2856 
2857  // Handle the optional bitcast.
2858  if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
2859  Assert(BI->getOperand(0) == RetVal,
2860  "bitcast following musttail call must use the call", BI);
2861  RetVal = BI;
2862  Next = BI->getNextNode();
2863  }
2864 
2865  // Check the return.
2866  ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
2867  Assert(Ret, "musttail call must be precede a ret with an optional bitcast",
2868  &CI);
2869  Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
2870  "musttail call result must be returned", Ret);
2871 }
2872 
2873 void Verifier::visitCallInst(CallInst &CI) {
2874  verifyCallSite(&CI);
2875 
2876  if (CI.isMustTailCall())
2877  verifyMustTailCall(CI);
2878 }
2879 
2880 void Verifier::visitInvokeInst(InvokeInst &II) {
2881  verifyCallSite(&II);
2882 
2883  // Verify that the first non-PHI instruction of the unwind destination is an
2884  // exception handling instruction.
2885  Assert(
2886  II.getUnwindDest()->isEHPad(),
2887  "The unwind destination does not have an exception handling instruction!",
2888  &II);
2889 
2890  visitTerminatorInst(II);
2891 }
2892 
2893 /// visitBinaryOperator - Check that both arguments to the binary operator are
2894 /// of the same type!
2895 ///
2896 void Verifier::visitBinaryOperator(BinaryOperator &B) {
2897  Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
2898  "Both operands to a binary operator are not of the same type!", &B);
2899 
2900  switch (B.getOpcode()) {
2901  // Check that integer arithmetic operators are only used with
2902  // integral operands.
2903  case Instruction::Add:
2904  case Instruction::Sub:
2905  case Instruction::Mul:
2906  case Instruction::SDiv:
2907  case Instruction::UDiv:
2908  case Instruction::SRem:
2909  case Instruction::URem:
2911  "Integer arithmetic operators only work with integral types!", &B);
2912  Assert(B.getType() == B.getOperand(0)->getType(),
2913  "Integer arithmetic operators must have same type "
2914  "for operands and result!",
2915  &B);
2916  break;
2917  // Check that floating-point arithmetic operators are only used with
2918  // floating-point operands.
2919  case Instruction::FAdd:
2920  case Instruction::FSub:
2921  case Instruction::FMul:
2922  case Instruction::FDiv:
2923  case Instruction::FRem:
2925  "Floating-point arithmetic operators only work with "
2926  "floating-point types!",
2927  &B);
2928  Assert(B.getType() == B.getOperand(0)->getType(),
2929  "Floating-point arithmetic operators must have same type "
2930  "for operands and result!",
2931  &B);
2932  break;
2933  // Check that logical operators are only used with integral operands.
2934  case Instruction::And:
2935  case Instruction::Or:
2936  case Instruction::Xor:
2938  "Logical operators only work with integral types!", &B);
2939  Assert(B.getType() == B.getOperand(0)->getType(),
2940  "Logical operators must have same type for operands and result!",
2941  &B);
2942  break;
2943  case Instruction::Shl:
2944  case Instruction::LShr:
2945  case Instruction::AShr:
2947  "Shifts only work with integral types!", &B);
2948  Assert(B.getType() == B.getOperand(0)->getType(),
2949  "Shift return type must be same as operands!", &B);
2950  break;
2951  default:
2952  llvm_unreachable("Unknown BinaryOperator opcode!");
2953  }
2954 
2955  visitInstruction(B);
2956 }
2957 
2958 void Verifier::visitICmpInst(ICmpInst &IC) {
2959  // Check that the operands are the same type
2960  Type *Op0Ty = IC.getOperand(0)->getType();
2961  Type *Op1Ty = IC.getOperand(1)->getType();
2962  Assert(Op0Ty == Op1Ty,
2963  "Both operands to ICmp instruction are not of the same type!", &IC);
2964  // Check that the operands are the right type
2965  Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
2966  "Invalid operand types for ICmp instruction", &IC);
2967  // Check that the predicate is valid.
2968  Assert(IC.isIntPredicate(),
2969  "Invalid predicate in ICmp instruction!", &IC);
2970 
2971  visitInstruction(IC);
2972 }
2973 
2974 void Verifier::visitFCmpInst(FCmpInst &FC) {
2975  // Check that the operands are the same type
2976  Type *Op0Ty = FC.getOperand(0)->getType();
2977  Type *Op1Ty = FC.getOperand(1)->getType();
2978  Assert(Op0Ty == Op1Ty,
2979  "Both operands to FCmp instruction are not of the same type!", &FC);
2980  // Check that the operands are the right type
2981  Assert(Op0Ty->isFPOrFPVectorTy(),
2982  "Invalid operand types for FCmp instruction", &FC);
2983  // Check that the predicate is valid.
2984  Assert(FC.isFPPredicate(),
2985  "Invalid predicate in FCmp instruction!", &FC);
2986 
2987  visitInstruction(FC);
2988 }
2989 
2990 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
2991  Assert(
2993  "Invalid extractelement operands!", &EI);
2994  visitInstruction(EI);
2995 }
2996 
2997 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
2999  IE.getOperand(2)),
3000  "Invalid insertelement operands!", &IE);
3001  visitInstruction(IE);
3002 }
3003 
3004 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3006  SV.getOperand(2)),
3007  "Invalid shufflevector operands!", &SV);
3008  visitInstruction(SV);
3009 }
3010 
3011 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3012  Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3013 
3014  Assert(isa<PointerType>(TargetTy),
3015  "GEP base pointer is not a vector or a vector of pointers", &GEP);
3016  Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3017  SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3018  Type *ElTy =
3020  Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3021 
3022  Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3023  GEP.getResultElementType() == ElTy,
3024  "GEP is not of right type for indices!", &GEP, ElTy);
3025 
3026  if (GEP.getType()->isVectorTy()) {
3027  // Additional checks for vector GEPs.
3028  unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3029  if (GEP.getPointerOperandType()->isVectorTy())
3030  Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3031  "Vector GEP result width doesn't match operand's", &GEP);
3032  for (Value *Idx : Idxs) {
3033  Type *IndexTy = Idx->getType();
3034  if (IndexTy->isVectorTy()) {
3035  unsigned IndexWidth = IndexTy->getVectorNumElements();
3036  Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3037  }
3038  Assert(IndexTy->isIntOrIntVectorTy(),
3039  "All GEP indices should be of integer type");
3040  }
3041  }
3042  visitInstruction(GEP);
3043 }
3044 
3045 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3046  return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3047 }
3048 
3049 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3050  assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3051  "precondition violation");
3052 
3053  unsigned NumOperands = Range->getNumOperands();
3054  Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3055  unsigned NumRanges = NumOperands / 2;
3056  Assert(NumRanges >= 1, "It should have at least one range!", Range);
3057 
3058  ConstantRange LastRange(1); // Dummy initial value
3059  for (unsigned i = 0; i < NumRanges; ++i) {
3060  ConstantInt *Low =
3061  mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3062  Assert(Low, "The lower limit must be an integer!", Low);
3063  ConstantInt *High =
3064  mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3065  Assert(High, "The upper limit must be an integer!", High);
3066  Assert(High->getType() == Low->getType() && High->getType() == Ty,
3067  "Range types must match instruction type!", &I);
3068 
3069  APInt HighV = High->getValue();
3070  APInt LowV = Low->getValue();
3071  ConstantRange CurRange(LowV, HighV);
3072  Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3073  "Range must not be empty!", Range);
3074  if (i != 0) {
3075  Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3076  "Intervals are overlapping", Range);
3077  Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3078  Range);
3079  Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3080  Range);
3081  }
3082  LastRange = ConstantRange(LowV, HighV);
3083  }
3084  if (NumRanges > 2) {
3085  APInt FirstLow =
3086  mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3087  APInt FirstHigh =
3088  mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3089  ConstantRange FirstRange(FirstLow, FirstHigh);
3090  Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3091  "Intervals are overlapping", Range);
3092  Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3093  Range);
3094  }
3095 }
3096 
3097 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3098  unsigned Size = DL.getTypeSizeInBits(Ty);
3099  Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3100  Assert(!(Size & (Size - 1)),
3101  "atomic memory access' operand must have a power-of-two size", Ty, I);
3102 }
3103 
3104 void Verifier::visitLoadInst(LoadInst &LI) {
3106  Assert(PTy, "Load operand must be a pointer.", &LI);
3107  Type *ElTy = LI.getType();
3109  "huge alignment values are unsupported", &LI);
3110  Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3111  if (LI.isAtomic()) {
3114  "Load cannot have Release ordering", &LI);
3115  Assert(LI.getAlignment() != 0,
3116  "Atomic load must specify explicit alignment", &LI);
3117  Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
3118  ElTy->isFloatingPointTy(),
3119  "atomic load operand must have integer, pointer, or floating point "
3120  "type!",
3121  ElTy, &LI);
3122  checkAtomicMemAccessSize(ElTy, &LI);
3123  } else {
3125  "Non-atomic load cannot have SynchronizationScope specified", &LI);
3126  }
3127 
3128  visitInstruction(LI);
3129 }
3130 
3131 void Verifier::visitStoreInst(StoreInst &SI) {
3133  Assert(PTy, "Store operand must be a pointer.", &SI);
3134  Type *ElTy = PTy->getElementType();
3135  Assert(ElTy == SI.getOperand(0)->getType(),
3136  "Stored value type does not match pointer operand type!", &SI, ElTy);
3138  "huge alignment values are unsupported", &SI);
3139  Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3140  if (SI.isAtomic()) {
3143  "Store cannot have Acquire ordering", &SI);
3144  Assert(SI.getAlignment() != 0,
3145  "Atomic store must specify explicit alignment", &SI);
3146  Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
3147  ElTy->isFloatingPointTy(),
3148  "atomic store operand must have integer, pointer, or floating point "
3149  "type!",
3150  ElTy, &SI);
3151  checkAtomicMemAccessSize(ElTy, &SI);
3152  } else {
3154  "Non-atomic store cannot have SynchronizationScope specified", &SI);
3155  }
3156  visitInstruction(SI);
3157 }
3158 
3159 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
3160 void Verifier::verifySwiftErrorCallSite(CallSite CS,
3161  const Value *SwiftErrorVal) {
3162  unsigned Idx = 0;
3163  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
3164  I != E; ++I, ++Idx) {
3165  if (*I == SwiftErrorVal) {
3166  Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
3167  "swifterror value when used in a callsite should be marked "
3168  "with swifterror attribute",
3169  SwiftErrorVal, CS);
3170  }
3171  }
3172 }
3173 
3174 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3175  // Check that swifterror value is only used by loads, stores, or as
3176  // a swifterror argument.
3177  for (const User *U : SwiftErrorVal->users()) {
3178  Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3179  isa<InvokeInst>(U),
3180  "swifterror value can only be loaded and stored from, or "
3181  "as a swifterror argument!",
3182  SwiftErrorVal, U);
3183  // If it is used by a store, check it is the second operand.
3184  if (auto StoreI = dyn_cast<StoreInst>(U))
3185  Assert(StoreI->getOperand(1) == SwiftErrorVal,
3186  "swifterror value should be the second operand when used "
3187  "by stores", SwiftErrorVal, U);
3188  if (auto CallI = dyn_cast<CallInst>(U))
3189  verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
3190  if (auto II = dyn_cast<InvokeInst>(U))
3191  verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
3192  }
3193 }
3194 
3195 void Verifier::visitAllocaInst(AllocaInst &AI) {
3196  SmallPtrSet<Type*, 4> Visited;
3197  PointerType *PTy = AI.getType();
3198  // TODO: Relax this restriction?
3200  "Allocation instruction pointer not in the stack address space!",
3201  &AI);
3202  Assert(AI.getAllocatedType()->isSized(&Visited),
3203  "Cannot allocate unsized type", &AI);
3205  "Alloca array size must have integer type", &AI);
3207  "huge alignment values are unsupported", &AI);
3208 
3209  if (AI.isSwiftError()) {
3210  verifySwiftErrorValue(&AI);
3211  }
3212 
3213  visitInstruction(AI);
3214 }
3215 
3216 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3217 
3218  // FIXME: more conditions???
3220  "cmpxchg instructions must be atomic.", &CXI);
3222  "cmpxchg instructions must be atomic.", &CXI);
3224  "cmpxchg instructions cannot be unordered.", &CXI);
3226  "cmpxchg instructions cannot be unordered.", &CXI);
3228  "cmpxchg instructions failure argument shall be no stronger than the "
3229  "success argument",
3230  &CXI);
3233  "cmpxchg failure ordering cannot include release semantics", &CXI);
3234 
3235  PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3236  Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3237  Type *ElTy = PTy->getElementType();
3238  Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(),
3239  "cmpxchg operand must have integer or pointer type",
3240  ElTy, &CXI);
3241  checkAtomicMemAccessSize(ElTy, &CXI);
3242  Assert(ElTy == CXI.getOperand(1)->getType(),
3243  "Expected value type does not match pointer operand type!", &CXI,
3244  ElTy);
3245  Assert(ElTy == CXI.getOperand(2)->getType(),
3246  "Stored value type does not match pointer operand type!", &CXI, ElTy);
3247  visitInstruction(CXI);
3248 }
3249 
3250 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3252  "atomicrmw instructions must be atomic.", &RMWI);
3254  "atomicrmw instructions cannot be unordered.", &RMWI);
3255  PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3256  Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3257  Type *ElTy = PTy->getElementType();
3258  Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!",
3259  &RMWI, ElTy);
3260  checkAtomicMemAccessSize(ElTy, &RMWI);
3261  Assert(ElTy == RMWI.getOperand(1)->getType(),
3262  "Argument value type does not match pointer operand type!", &RMWI,
3263  ElTy);
3266  "Invalid binary operation!", &RMWI);
3267  visitInstruction(RMWI);
3268 }
3269 
3270 void Verifier::visitFenceInst(FenceInst &FI) {
3271  const AtomicOrdering Ordering = FI.getOrdering();
3272  Assert(Ordering == AtomicOrdering::Acquire ||
3273  Ordering == AtomicOrdering::Release ||
3274  Ordering == AtomicOrdering::AcquireRelease ||
3276  "fence instructions may only have acquire, release, acq_rel, or "
3277  "seq_cst ordering.",
3278  &FI);
3279  visitInstruction(FI);
3280 }
3281 
3282 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3284  EVI.getIndices()) == EVI.getType(),
3285  "Invalid ExtractValueInst operands!", &EVI);
3286 
3287  visitInstruction(EVI);
3288 }
3289 
3290 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3292  IVI.getIndices()) ==
3293  IVI.getOperand(1)->getType(),
3294  "Invalid InsertValueInst operands!", &IVI);
3295 
3296  visitInstruction(IVI);
3297 }
3298 
3299 static Value *getParentPad(Value *EHPad) {
3300  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3301  return FPI->getParentPad();
3302 
3303  return cast<CatchSwitchInst>(EHPad)->getParentPad();
3304 }
3305 
3306 void Verifier::visitEHPadPredecessors(Instruction &I) {
3307  assert(I.isEHPad());
3308 
3309  BasicBlock *BB = I.getParent();
3310  Function *F = BB->getParent();
3311 
3312  Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3313 
3314  if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3315  // The landingpad instruction defines its parent as a landing pad block. The
3316  // landing pad block may be branched to only by the unwind edge of an
3317  // invoke.
3318  for (BasicBlock *PredBB : predecessors(BB)) {
3319  const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3320  Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3321  "Block containing LandingPadInst must be jumped to "
3322  "only by the unwind edge of an invoke.",
3323  LPI);
3324  }
3325  return;
3326  }
3327  if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3328  if (!pred_empty(BB))
3329  Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3330  "Block containg CatchPadInst must be jumped to "
3331  "only by its catchswitch.",
3332  CPI);
3333  Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3334  "Catchswitch cannot unwind to one of its catchpads",
3335  CPI->getCatchSwitch(), CPI);
3336  return;
3337  }
3338 
3339  // Verify that each pred has a legal terminator with a legal to/from EH
3340  // pad relationship.
3341  Instruction *ToPad = &I;
3342  Value *ToPadParent = getParentPad(ToPad);
3343  for (BasicBlock *PredBB : predecessors(BB)) {
3344  TerminatorInst *TI = PredBB->getTerminator();
3345  Value *FromPad;
3346  if (auto *II = dyn_cast<InvokeInst>(TI)) {
3347  Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3348  "EH pad must be jumped to via an unwind edge", ToPad, II);
3349  if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3350  FromPad = Bundle->Inputs[0];
3351  else
3352  FromPad = ConstantTokenNone::get(II->getContext());
3353  } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3354  FromPad = CRI->getOperand(0);
3355  Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3356  } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3357  FromPad = CSI;
3358  } else {
3359  Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3360  }
3361 
3362  // The edge may exit from zero or more nested pads.
3363  SmallSet<Value *, 8> Seen;
3364  for (;; FromPad = getParentPad(FromPad)) {
3365  Assert(FromPad != ToPad,
3366  "EH pad cannot handle exceptions raised within it", FromPad, TI);
3367  if (FromPad == ToPadParent) {
3368  // This is a legal unwind edge.
3369  break;
3370  }
3371  Assert(!isa<ConstantTokenNone>(FromPad),
3372  "A single unwind edge may only enter one EH pad", TI);
3373  Assert(Seen.insert(FromPad).second,
3374  "EH pad jumps through a cycle of pads", FromPad);
3375  }
3376  }
3377 }
3378 
3379 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3380  // The landingpad instruction is ill-formed if it doesn't have any clauses and
3381  // isn't a cleanup.
3382  Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3383  "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3384 
3385  visitEHPadPredecessors(LPI);
3386 
3387  if (!LandingPadResultTy)
3388  LandingPadResultTy = LPI.getType();
3389  else
3390  Assert(LandingPadResultTy == LPI.getType(),
3391  "The landingpad instruction should have a consistent result type "
3392  "inside a function.",
3393  &LPI);
3394 
3395  Function *F = LPI.getParent()->getParent();
3396  Assert(F->hasPersonalityFn(),
3397  "LandingPadInst needs to be in a function with a personality.", &LPI);
3398 
3399  // The landingpad instruction must be the first non-PHI instruction in the
3400  // block.
3401  Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3402  "LandingPadInst not the first non-PHI instruction in the block.",
3403  &LPI);
3404 
3405  for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3406  Constant *Clause = LPI.getClause(i);
3407  if (LPI.isCatch(i)) {
3408  Assert(isa<PointerType>(Clause->getType()),
3409  "Catch operand does not have pointer type!", &LPI);
3410  } else {
3411  Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3412  Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3413  "Filter operand is not an array of constants!", &LPI);
3414  }
3415  }
3416 
3417  visitInstruction(LPI);
3418 }
3419 
3420 void Verifier::visitResumeInst(ResumeInst &RI) {
3422  "ResumeInst needs to be in a function with a personality.", &RI);
3423 
3424  if (!LandingPadResultTy)
3425  LandingPadResultTy = RI.getValue()->getType();
3426  else
3427  Assert(LandingPadResultTy == RI.getValue()->getType(),
3428  "The resume instruction should have a consistent result type "
3429  "inside a function.",
3430  &RI);
3431 
3432  visitTerminatorInst(RI);
3433 }
3434 
3435 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3436  BasicBlock *BB = CPI.getParent();
3437 
3438  Function *F = BB->getParent();
3439  Assert(F->hasPersonalityFn(),
3440  "CatchPadInst needs to be in a function with a personality.", &CPI);
3441 
3442  Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3443  "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3444  CPI.getParentPad());
3445 
3446  // The catchpad instruction must be the first non-PHI instruction in the
3447  // block.
3448  Assert(BB->getFirstNonPHI() == &CPI,
3449  "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3450 
3451  visitEHPadPredecessors(CPI);
3452  visitFuncletPadInst(CPI);
3453 }
3454 
3455 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3456  Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3457  "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3458  CatchReturn.getOperand(0));
3459 
3460  visitTerminatorInst(CatchReturn);
3461 }
3462 
3463 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3464  BasicBlock *BB = CPI.getParent();
3465 
3466  Function *F = BB->getParent();
3467  Assert(F->hasPersonalityFn(),
3468  "CleanupPadInst needs to be in a function with a personality.", &CPI);
3469 
3470  // The cleanuppad instruction must be the first non-PHI instruction in the
3471  // block.
3472  Assert(BB->getFirstNonPHI() == &CPI,
3473  "CleanupPadInst not the first non-PHI instruction in the block.",
3474  &CPI);
3475 
3476  auto *ParentPad = CPI.getParentPad();
3477  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3478  "CleanupPadInst has an invalid parent.", &CPI);
3479 
3480  visitEHPadPredecessors(CPI);
3481  visitFuncletPadInst(CPI);
3482 }
3483 
3484 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3485  User *FirstUser = nullptr;
3486  Value *FirstUnwindPad = nullptr;
3487  SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3489 
3490  while (!Worklist.empty()) {
3491  FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3492  Assert(Seen.insert(CurrentPad).second,
3493  "FuncletPadInst must not be nested within itself", CurrentPad);
3494  Value *UnresolvedAncestorPad = nullptr;
3495  for (User *U : CurrentPad->users()) {
3496  BasicBlock *UnwindDest;
3497  if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3498  UnwindDest = CRI->getUnwindDest();
3499  } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3500  // We allow catchswitch unwind to caller to nest
3501  // within an outer pad that unwinds somewhere else,
3502  // because catchswitch doesn't have a nounwind variant.
3503  // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3504  if (CSI->unwindsToCaller())
3505  continue;
3506  UnwindDest = CSI->getUnwindDest();
3507  } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3508  UnwindDest = II->getUnwindDest();
3509  } else if (isa<CallInst>(U)) {
3510  // Calls which don't unwind may be found inside funclet
3511  // pads that unwind somewhere else. We don't *require*
3512  // such calls to be annotated nounwind.
3513  continue;
3514  } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3515  // The unwind dest for a cleanup can only be found by
3516  // recursive search. Add it to the worklist, and we'll
3517  // search for its first use that determines where it unwinds.
3518  Worklist.push_back(CPI);
3519  continue;
3520  } else {
3521  Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3522  continue;
3523  }
3524 
3525  Value *UnwindPad;
3526  bool ExitsFPI;
3527  if (UnwindDest) {
3528  UnwindPad = UnwindDest->getFirstNonPHI();
3529  if (!cast<Instruction>(UnwindPad)->isEHPad())
3530  continue;
3531  Value *UnwindParent = getParentPad(UnwindPad);
3532  // Ignore unwind edges that don't exit CurrentPad.
3533  if (UnwindParent == CurrentPad)
3534  continue;
3535  // Determine whether the original funclet pad is exited,
3536  // and if we are scanning nested pads determine how many
3537  // of them are exited so we can stop searching their
3538  // children.
3539  Value *ExitedPad = CurrentPad;
3540  ExitsFPI = false;
3541  do {
3542  if (ExitedPad == &FPI) {
3543  ExitsFPI = true;
3544  // Now we can resolve any ancestors of CurrentPad up to
3545  // FPI, but not including FPI since we need to make sure
3546  // to check all direct users of FPI for consistency.
3547  UnresolvedAncestorPad = &FPI;
3548  break;
3549  }
3550  Value *ExitedParent = getParentPad(ExitedPad);
3551  if (ExitedParent == UnwindParent) {
3552  // ExitedPad is the ancestor-most pad which this unwind
3553  // edge exits, so we can resolve up to it, meaning that
3554  // ExitedParent is the first ancestor still unresolved.
3555  UnresolvedAncestorPad = ExitedParent;
3556  break;
3557  }
3558  ExitedPad = ExitedParent;
3559  } while (!isa<ConstantTokenNone>(ExitedPad));
3560  } else {
3561  // Unwinding to caller exits all pads.
3562  UnwindPad = ConstantTokenNone::get(FPI.getContext());
3563  ExitsFPI = true;
3564  UnresolvedAncestorPad = &FPI;
3565  }
3566 
3567  if (ExitsFPI) {
3568  // This unwind edge exits FPI. Make sure it agrees with other
3569  // such edges.
3570  if (FirstUser) {
3571  Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3572  "pad must have the same unwind "
3573  "dest",
3574  &FPI, U, FirstUser);
3575  } else {
3576  FirstUser = U;
3577  FirstUnwindPad = UnwindPad;
3578  // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3579  if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3580  getParentPad(UnwindPad) == getParentPad(&FPI))
3581  SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U);
3582  }
3583  }
3584  // Make sure we visit all uses of FPI, but for nested pads stop as
3585  // soon as we know where they unwind to.
3586  if (CurrentPad != &FPI)
3587  break;
3588  }
3589  if (UnresolvedAncestorPad) {
3590  if (CurrentPad == UnresolvedAncestorPad) {
3591  // When CurrentPad is FPI itself, we don't mark it as resolved even if
3592  // we've found an unwind edge that exits it, because we need to verify
3593  // all direct uses of FPI.
3594  assert(CurrentPad == &FPI);
3595  continue;
3596  }
3597  // Pop off the worklist any nested pads that we've found an unwind
3598  // destination for. The pads on the worklist are the uncles,
3599  // great-uncles, etc. of CurrentPad. We've found an unwind destination
3600  // for all ancestors of CurrentPad up to but not including
3601  // UnresolvedAncestorPad.
3602  Value *ResolvedPad = CurrentPad;
3603  while (!Worklist.empty()) {
3604  Value *UnclePad = Worklist.back();
3605  Value *AncestorPad = getParentPad(UnclePad);
3606  // Walk ResolvedPad up the ancestor list until we either find the
3607  // uncle's parent or the last resolved ancestor.
3608  while (ResolvedPad != AncestorPad) {
3609  Value *ResolvedParent = getParentPad(ResolvedPad);
3610  if (ResolvedParent == UnresolvedAncestorPad) {
3611  break;
3612  }
3613  ResolvedPad = ResolvedParent;
3614  }
3615  // If the resolved ancestor search didn't find the uncle's parent,
3616  // then the uncle is not yet resolved.
3617  if (ResolvedPad != AncestorPad)
3618  break;
3619  // This uncle is resolved, so pop it from the worklist.
3620  Worklist.pop_back();
3621  }
3622  }
3623  }
3624 
3625  if (FirstUnwindPad) {
3626  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3627  BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3628  Value *SwitchUnwindPad;
3629  if (SwitchUnwindDest)
3630  SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3631  else
3632  SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3633  Assert(SwitchUnwindPad == FirstUnwindPad,
3634  "Unwind edges out of a catch must have the same unwind dest as "
3635  "the parent catchswitch",
3636  &FPI, FirstUser, CatchSwitch);
3637  }
3638  }
3639 
3640  visitInstruction(FPI);
3641 }
3642 
3643 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3644  BasicBlock *BB = CatchSwitch.getParent();
3645 
3646  Function *F = BB->getParent();
3647  Assert(F->hasPersonalityFn(),
3648  "CatchSwitchInst needs to be in a function with a personality.",
3649  &CatchSwitch);
3650 
3651  // The catchswitch instruction must be the first non-PHI instruction in the
3652  // block.
3653  Assert(BB->getFirstNonPHI() == &CatchSwitch,
3654  "CatchSwitchInst not the first non-PHI instruction in the block.",
3655  &CatchSwitch);
3656 
3657  auto *ParentPad = CatchSwitch.getParentPad();
3658  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3659  "CatchSwitchInst has an invalid parent.", ParentPad);
3660 
3661  if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3662  Instruction *I = UnwindDest->getFirstNonPHI();
3663  Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3664  "CatchSwitchInst must unwind to an EH block which is not a "
3665  "landingpad.",
3666  &CatchSwitch);
3667 
3668  // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3669  if (getParentPad(I) == ParentPad)
3670  SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3671  }
3672 
3673  Assert(CatchSwitch.getNumHandlers() != 0,
3674  "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3675 
3676  for (BasicBlock *Handler : CatchSwitch.handlers()) {
3677  Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3678  "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3679  }
3680 
3681  visitEHPadPredecessors(CatchSwitch);
3682  visitTerminatorInst(CatchSwitch);
3683 }
3684 
3685 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3686  Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3687  "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3688  CRI.getOperand(0));
3689 
3690  if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
3691  Instruction *I = UnwindDest->getFirstNonPHI();
3692  Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3693  "CleanupReturnInst must unwind to an EH block which is not a "
3694  "landingpad.",
3695  &CRI);
3696  }
3697 
3698  visitTerminatorInst(CRI);
3699 }
3700 
3701 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
3702  Instruction *Op = cast<Instruction>(I.getOperand(i));
3703  // If the we have an invalid invoke, don't try to compute the dominance.
3704  // We already reject it in the invoke specific checks and the dominance
3705  // computation doesn't handle multiple edges.
3706  if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
3707  if (II->getNormalDest() == II->getUnwindDest())
3708  return;
3709  }
3710 
3711  // Quick check whether the def has already been encountered in the same block.
3712  // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI
3713  // uses are defined to happen on the incoming edge, not at the instruction.
3714  //
3715  // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
3716  // wrapping an SSA value, assert that we've already encountered it. See
3717  // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
3718  if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
3719  return;
3720 
3721  const Use &U = I.getOperandUse(i);
3722  Assert(DT.dominates(Op, U),
3723  "Instruction does not dominate all uses!", Op, &I);
3724 }
3725 
3726 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
3727  Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
3728  "apply only to pointer types", &I);
3729  Assert(isa<LoadInst>(I),
3730  "dereferenceable, dereferenceable_or_null apply only to load"
3731  " instructions, use attributes for calls or invokes", &I);
3732  Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
3733  "take one operand!", &I);
3734  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
3735  Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
3736  "dereferenceable_or_null metadata value must be an i64!", &I);
3737 }
3738 
3739 /// verifyInstruction - Verify that an instruction is well formed.
3740 ///
3741 void Verifier::visitInstruction(Instruction &I) {
3742  BasicBlock *BB = I.getParent();
3743  Assert(BB, "Instruction not embedded in basic block!", &I);
3744 
3745  if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
3746  for (User *U : I.users()) {
3747  Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
3748  "Only PHI nodes may reference their own value!", &I);
3749  }
3750  }
3751 
3752  // Check that void typed values don't have names
3753  Assert(!I.getType()->isVoidTy() || !I.hasName(),
3754  "Instruction has a name, but provides a void value!", &I);
3755 
3756  // Check that the return value of the instruction is either void or a legal
3757  // value type.
3758  Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
3759  "Instruction returns a non-scalar type!", &I);
3760 
3761  // Check that the instruction doesn't produce metadata. Calls are already
3762  // checked against the callee type.
3763  Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
3764  "Invalid use of metadata!", &I);
3765 
3766  // Check that all uses of the instruction, if they are instructions
3767  // themselves, actually have parent basic blocks. If the use is not an
3768  // instruction, it is an error!
3769  for (Use &U : I.uses()) {
3770  if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
3771  Assert(Used->getParent() != nullptr,
3772  "Instruction referencing"
3773  " instruction not embedded in a basic block!",
3774  &I, Used);
3775  else {
3776  CheckFailed("Use of instruction is not an instruction!", U);
3777  return;
3778  }
3779  }
3780 
3781  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
3782  Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
3783 
3784  // Check to make sure that only first-class-values are operands to
3785  // instructions.
3786  if (!I.getOperand(i)->getType()->isFirstClassType()) {
3787  Assert(false, "Instruction operands must be first-class values!", &I);
3788  }
3789 
3790  if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
3791  // Check to make sure that the "address of" an intrinsic function is never
3792  // taken.
3793  Assert(
3794  !F->isIntrinsic() ||
3795  i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0),
3796  "Cannot take the address of an intrinsic!", &I);
3797  Assert(
3798  !F->isIntrinsic() || isa<CallInst>(I) ||
3799  F->getIntrinsicID() == Intrinsic::donothing ||
3800  F->getIntrinsicID() == Intrinsic::coro_resume ||
3801  F->getIntrinsicID() == Intrinsic::coro_destroy ||
3802  F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
3803  F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
3804  F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint,
3805  "Cannot invoke an intrinsic other than donothing, patchpoint, "
3806  "statepoint, coro_resume or coro_destroy",
3807  &I);
3808  Assert(F->getParent() == &M, "Referencing function in another module!",
3809  &I, &M, F, F->getParent());
3810  } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
3811  Assert(OpBB->getParent() == BB->getParent(),
3812  "Referring to a basic block in another function!", &I);
3813  } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
3814  Assert(OpArg->getParent() == BB->getParent(),
3815  "Referring to an argument in another function!", &I);
3816  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
3817  Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
3818  &M, GV, GV->getParent());
3819  } else if (isa<Instruction>(I.getOperand(i))) {
3820  verifyDominatesUse(I, i);
3821  } else if (isa<InlineAsm>(I.getOperand(i))) {
3822  Assert((i + 1 == e && isa<CallInst>(I)) ||
3823  (i + 3 == e && isa<InvokeInst>(I)),
3824  "Cannot take the address of an inline asm!", &I);
3825  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
3826  if (CE->getType()->isPtrOrPtrVectorTy() ||
3828  // If we have a ConstantExpr pointer, we need to see if it came from an
3829  // illegal bitcast. If the datalayout string specifies non-integral
3830  // address spaces then we also need to check for illegal ptrtoint and
3831  // inttoptr expressions.
3832  visitConstantExprsRecursively(CE);
3833  }
3834  }
3835  }
3836 
3837  if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
3839  "fpmath requires a floating point result!", &I);
3840  Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
3841  if (ConstantFP *CFP0 =
3842  mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
3843  const APFloat &Accuracy = CFP0->getValueAPF();
3844  Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
3845  "fpmath accuracy must have float type", &I);
3846  Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
3847  "fpmath accuracy not a positive number!", &I);
3848  } else {
3849  Assert(false, "invalid fpmath accuracy!", &I);
3850  }
3851  }
3852 
3853  if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
3854  Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
3855  "Ranges are only for loads, calls and invokes!", &I);
3856  visitRangeMetadata(I, Range, I.getType());
3857  }
3858 
3860  Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
3861  &I);
3862  Assert(isa<LoadInst>(I),
3863  "nonnull applies only to load instructions, use attributes"
3864  " for calls or invokes",
3865  &I);
3866  }
3867 
3869  visitDereferenceableMetadata(I, MD);
3870 
3872  visitDereferenceableMetadata(I, MD);
3873 
3874  if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
3875  TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
3876 
3877  if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
3878  Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
3879  &I);
3880  Assert(isa<LoadInst>(I), "align applies only to load instructions, "
3881  "use attributes for calls or invokes", &I);
3882  Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
3883  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
3884  Assert(CI && CI->getType()->isIntegerTy(64),
3885  "align metadata value must be an i64!", &I);
3886  uint64_t Align = CI->getZExtValue();
3887  Assert(isPowerOf2_64(Align),
3888  "align metadata value must be a power of 2!", &I);
3890  "alignment is larger that implementation defined limit", &I);
3891  }
3892 
3893  if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
3894  AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
3895  visitMDNode(*N);
3896  }
3897 
3898  if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I))
3899  verifyFragmentExpression(*DII);
3900 
3901  InstsInThisBlock.insert(&I);
3902 }
3903 
3904 /// Allow intrinsics to be verified in different ways.
3905 void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
3906  Function *IF = CS.getCalledFunction();
3907  Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
3908  IF);
3909 
3910  // Verify that the intrinsic prototype lines up with what the .td files
3911  // describe.
3912  FunctionType *IFTy = IF->getFunctionType();
3913  bool IsVarArg = IFTy->isVarArg();
3914 
3916  getIntrinsicInfoTableEntries(ID, Table);
3918 
3919  SmallVector<Type *, 4> ArgTys;
3920  Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(),
3921  TableRef, ArgTys),
3922  "Intrinsic has incorrect return type!", IF);
3923  for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
3924  Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i),
3925  TableRef, ArgTys),
3926  "Intrinsic has incorrect argument type!", IF);
3927 
3928  // Verify if the intrinsic call matches the vararg property.
3929  if (IsVarArg)
3930  Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
3931  "Intrinsic was not defined with variable arguments!", IF);
3932  else
3933  Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
3934  "Callsite was not defined with variable arguments!", IF);
3935 
3936  // All descriptors should be absorbed by now.
3937  Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
3938 
3939  // Now that we have the intrinsic ID and the actual argument types (and we
3940  // know they are legal for the intrinsic!) get the intrinsic name through the
3941  // usual means. This allows us to verify the mangling of argument types into
3942  // the name.
3943  const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
3944  Assert(ExpectedName == IF->getName(),
3945  "Intrinsic name not mangled correctly for type arguments! "
3946  "Should be: " +
3947  ExpectedName,
3948  IF);
3949 
3950  // If the intrinsic takes MDNode arguments, verify that they are either global
3951  // or are local to *this* function.
3952  for (Value *V : CS.args())
3953  if (auto *MD = dyn_cast<MetadataAsValue>(V))
3954  visitMetadataAsValue(*MD, CS.getCaller());
3955 
3956  switch (ID) {
3957  default:
3958  break;
3959  case Intrinsic::coro_id: {
3960  auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts();
3961  if (isa<ConstantPointerNull>(InfoArg))
3962  break;
3963  auto *GV = dyn_cast<GlobalVariable>(InfoArg);
3964  Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
3965  "info argument of llvm.coro.begin must refer to an initialized "
3966  "constant");
3967  Constant *Init = GV->getInitializer();
3968  Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
3969  "info argument of llvm.coro.begin must refer to either a struct or "
3970  "an array");
3971  break;
3972  }
3973  case Intrinsic::ctlz: // llvm.ctlz
3974  case Intrinsic::cttz: // llvm.cttz
3975  Assert(isa<ConstantInt>(CS.getArgOperand(1)),
3976  "is_zero_undef argument of bit counting intrinsics must be a "
3977  "constant int",
3978  CS);
3979  break;
3980  case Intrinsic::experimental_constrained_fadd:
3981  case Intrinsic::experimental_constrained_fsub:
3982  case Intrinsic::experimental_constrained_fmul:
3983  case Intrinsic::experimental_constrained_fdiv:
3984  case Intrinsic::experimental_constrained_frem:
3985  case Intrinsic::experimental_constrained_fma:
3986  case Intrinsic::experimental_constrained_sqrt:
3987  case Intrinsic::experimental_constrained_pow:
3988  case Intrinsic::experimental_constrained_powi:
3989  case Intrinsic::experimental_constrained_sin:
3990  case Intrinsic::experimental_constrained_cos:
3991  case Intrinsic::experimental_constrained_exp:
3992  case Intrinsic::experimental_constrained_exp2:
3993  case Intrinsic::experimental_constrained_log:
3994  case Intrinsic::experimental_constrained_log10:
3995  case Intrinsic::experimental_constrained_log2:
3996  case Intrinsic::experimental_constrained_rint:
3997  case Intrinsic::experimental_constrained_nearbyint:
3998  visitConstrainedFPIntrinsic(
3999  cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
4000  break;
4001  case Intrinsic::dbg_declare: // llvm.dbg.declare
4002  Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
4003  "invalid llvm.dbg.declare intrinsic call 1", CS);
4004  visitDbgIntrinsic("declare", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
4005  break;
4006  case Intrinsic::dbg_addr: // llvm.dbg.addr
4007  visitDbgIntrinsic("addr", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
4008  break;
4009  case Intrinsic::dbg_value: // llvm.dbg.value
4010  visitDbgIntrinsic("value", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
4011  break;
4012  case Intrinsic::memcpy:
4013  case Intrinsic::memmove:
4014  case Intrinsic::memset: {
4015  ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3));
4016  Assert(AlignCI,
4017  "alignment argument of memory intrinsics must be a constant int",
4018  CS);
4019  const APInt &AlignVal = AlignCI->getValue();
4020  Assert(AlignCI->isZero() || AlignVal.isPowerOf2(),
4021  "alignment argument of memory intrinsics must be a power of 2", CS);
4022  Assert(isa<ConstantInt>(CS.getArgOperand(4)),
4023  "isvolatile argument of memory intrinsics must be a constant int",
4024  CS);
4025  break;
4026  }
4027  case Intrinsic::memcpy_element_unordered_atomic: {
4029  cast<ElementUnorderedAtomicMemCpyInst>(CS.getInstruction());
4030  ;
4031 
4032  ConstantInt *ElementSizeCI =
4034  Assert(ElementSizeCI,
4035  "element size of the element-wise unordered atomic memory "
4036  "intrinsic must be a constant int",
4037  CS);
4038  const APInt &ElementSizeVal = ElementSizeCI->getValue();
4039  Assert(ElementSizeVal.isPowerOf2(),
4040  "element size of the element-wise atomic memory intrinsic "
4041  "must be a power of 2",
4042  CS);
4043 
4044  if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
4045  uint64_t Length = LengthCI->getZExtValue();
4046  uint64_t ElementSize = MI->getElementSizeInBytes();
4047  Assert((Length % ElementSize) == 0,
4048  "constant length must be a multiple of the element size in the "
4049  "element-wise atomic memory intrinsic",
4050  CS);
4051  }
4052 
4053  auto IsValidAlignment = [&](uint64_t Alignment) {
4054  return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4055  };
4056  uint64_t DstAlignment = CS.getParamAlignment(0),
4057  SrcAlignment = CS.getParamAlignment(1);
4058  Assert(IsValidAlignment(DstAlignment),
4059  "incorrect alignment of the destination argument", CS);
4060  Assert(IsValidAlignment(SrcAlignment),
4061  "incorrect alignment of the source argument", CS);
4062  break;
4063  }
4064  case Intrinsic::memmove_element_unordered_atomic: {
4065  auto *MI = cast<ElementUnorderedAtomicMemMoveInst>(CS.getInstruction());
4066 
4067  ConstantInt *ElementSizeCI =
4068  dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
4069  Assert(ElementSizeCI,
4070  "element size of the element-wise unordered atomic memory "
4071  "intrinsic must be a constant int",
4072  CS);
4073  const APInt &ElementSizeVal = ElementSizeCI->getValue();
4074  Assert(ElementSizeVal.isPowerOf2(),
4075  "element size of the element-wise atomic memory intrinsic "
4076  "must be a power of 2",
4077  CS);
4078 
4079  if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
4080  uint64_t Length = LengthCI->getZExtValue();
4081  uint64_t ElementSize = MI->getElementSizeInBytes();
4082  Assert((Length % ElementSize) == 0,
4083  "constant length must be a multiple of the element size in the "
4084  "element-wise atomic memory intrinsic",
4085  CS);
4086  }
4087 
4088  auto IsValidAlignment = [&](uint64_t Alignment) {
4089  return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4090  };
4091  uint64_t DstAlignment = CS.getParamAlignment(0),
4092  SrcAlignment = CS.getParamAlignment(1);
4093  Assert(IsValidAlignment(DstAlignment),
4094  "incorrect alignment of the destination argument", CS);
4095  Assert(IsValidAlignment(SrcAlignment),
4096  "incorrect alignment of the source argument", CS);
4097  break;
4098  }
4099  case Intrinsic::memset_element_unordered_atomic: {
4100  auto *MI = cast<ElementUnorderedAtomicMemSetInst>(CS.getInstruction());
4101 
4102  ConstantInt *ElementSizeCI =
4103  dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
4104  Assert(ElementSizeCI,
4105  "element size of the element-wise unordered atomic memory "
4106  "intrinsic must be a constant int",
4107  CS);
4108  const APInt &ElementSizeVal = ElementSizeCI->getValue();
4109  Assert(ElementSizeVal.isPowerOf2(),
4110  "element size of the element-wise atomic memory intrinsic "
4111  "must be a power of 2",
4112  CS);
4113 
4114  if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
4115  uint64_t Length = LengthCI->getZExtValue();
4116  uint64_t ElementSize = MI->getElementSizeInBytes();
4117  Assert((Length % ElementSize) == 0,
4118  "constant length must be a multiple of the element size in the "
4119  "element-wise atomic memory intrinsic",
4120  CS);
4121  }
4122 
4123  auto IsValidAlignment = [&](uint64_t Alignment) {
4124  return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4125  };
4126  uint64_t DstAlignment = CS.getParamAlignment(0);
4127  Assert(IsValidAlignment(DstAlignment),
4128  "incorrect alignment of the destination argument", CS);
4129  break;
4130  }
4131  case Intrinsic::gcroot:
4132  case Intrinsic::gcwrite:
4133  case Intrinsic::gcread:
4134  if (ID == Intrinsic::gcroot) {
4135  AllocaInst *AI =
4137  Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
4138  Assert(isa<Constant>(CS.getArgOperand(1)),
4139  "llvm.gcroot parameter #2 must be a constant.", CS);
4140  if (!AI->getAllocatedType()->isPointerTy()) {
4141  Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
4142  "llvm.gcroot parameter #1 must either be a pointer alloca, "
4143  "or argument #2 must be a non-null constant.",
4144  CS);
4145  }
4146  }
4147 
4148  Assert(CS.getParent()->getParent()->hasGC(),
4149  "Enclosing function does not use GC.", CS);
4150  break;
4151  case Intrinsic::init_trampoline:
4152  Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
4153  "llvm.init_trampoline parameter #2 must resolve to a function.",
4154  CS);
4155  break;
4156  case Intrinsic::prefetch:
4157  Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
4158  isa<ConstantInt>(CS.getArgOperand(2)) &&
4159  cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
4160  cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
4161  "invalid arguments to llvm.prefetch", CS);
4162  break;
4163  case Intrinsic::stackprotector:
4164  Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
4165  "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
4166  break;
4167  case Intrinsic::lifetime_start:
4168  case Intrinsic::lifetime_end:
4169  case Intrinsic::invariant_start:
4170  Assert(isa<ConstantInt>(CS.getArgOperand(0)),
4171  "size argument of memory use markers must be a constant integer",
4172  CS);
4173  break;
4174  case Intrinsic::invariant_end:
4175  Assert(isa<ConstantInt>(CS.getArgOperand(1)),
4176  "llvm.invariant.end parameter #2 must be a constant integer", CS);
4177  break;
4178 
4179  case Intrinsic::localescape: {
4180  BasicBlock *BB = CS.getParent();
4181  Assert(BB == &BB->getParent()->front(),
4182  "llvm.localescape used outside of entry block", CS);
4183  Assert(!SawFrameEscape,
4184  "multiple calls to llvm.localescape in one function", CS);
4185  for (Value *Arg : CS.args()) {
4186  if (isa<ConstantPointerNull>(Arg))
4187  continue; // Null values are allowed as placeholders.
4188  auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4189  Assert(AI && AI->isStaticAlloca(),
4190  "llvm.localescape only accepts static allocas", CS);
4191  }
4192  FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
4193  SawFrameEscape = true;
4194  break;
4195  }
4196  case Intrinsic::localrecover: {
4197  Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
4198  Function *Fn = dyn_cast<Function>(FnArg);
4199  Assert(Fn && !Fn->isDeclaration(),
4200  "llvm.localrecover first "
4201  "argument must be function defined in this module",
4202  CS);
4203  auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
4204  Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
4205  CS);
4206  auto &Entry = FrameEscapeInfo[Fn];
4207  Entry.second = unsigned(
4208  std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4209  break;
4210  }
4211 
4212  case Intrinsic::experimental_gc_statepoint:
4213  Assert(!CS.isInlineAsm(),
4214  "gc.statepoint support for inline assembly unimplemented", CS);
4215  Assert(CS.getParent()->getParent()->hasGC(),
4216  "Enclosing function does not use GC.", CS);
4217 
4218  verifyStatepoint(CS);
4219  break;
4220  case Intrinsic::experimental_gc_result: {
4221  Assert(CS.getParent()->getParent()->hasGC(),
4222  "Enclosing function does not use GC.", CS);
4223  // Are we tied to a statepoint properly?
4224  CallSite StatepointCS(CS.getArgOperand(0));
4225  const Function *StatepointFn =
4226  StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
4227  Assert(StatepointFn && StatepointFn->isDeclaration() &&
4228  StatepointFn->getIntrinsicID() ==
4229  Intrinsic::experimental_gc_statepoint,
4230  "gc.result operand #1 must be from a statepoint", CS,
4231  CS.getArgOperand(0));
4232 
4233  // Assert that result type matches wrapped callee.
4234  const Value *Target = StatepointCS.getArgument(2);
4235  auto *PT = cast<PointerType>(Target->getType());
4236  auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4237  Assert(CS.getType() == TargetFuncType->getReturnType(),
4238  "gc.result result type does not match wrapped callee", CS);
4239  break;
4240  }
4241  case Intrinsic::experimental_gc_relocate: {
4242  Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
4243 
4244  Assert(isa<PointerType>(CS.getType()->getScalarType()),
4245  "gc.relocate must return a pointer or a vector of pointers", CS);
4246 
4247  // Check that this relocate is correctly tied to the statepoint
4248 
4249  // This is case for relocate on the unwinding path of an invoke statepoint
4250  if (LandingPadInst *LandingPad =
4251  dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
4252 
4253  const BasicBlock *InvokeBB =
4254  LandingPad->getParent()->getUniquePredecessor();
4255 
4256  // Landingpad relocates should have only one predecessor with invoke
4257  // statepoint terminator
4258  Assert(InvokeBB, "safepoints should have unique landingpads",
4259  LandingPad->getParent());
4260  Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4261  InvokeBB);
4262  Assert(isStatepoint(InvokeBB->getTerminator()),
4263  "gc relocate should be linked to a statepoint", InvokeBB);
4264  }
4265  else {
4266  // In all other cases relocate should be tied to the statepoint directly.
4267  // This covers relocates on a normal return path of invoke statepoint and
4268  // relocates of a call statepoint.
4269  auto Token = CS.getArgOperand(0);
4270  Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4271  "gc relocate is incorrectly tied to the statepoint", CS, Token);
4272  }
4273 
4274  // Verify rest of the relocate arguments.
4275 
4276  ImmutableCallSite StatepointCS(
4277  cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
4278 
4279  // Both the base and derived must be piped through the safepoint.
4280  Value* Base = CS.getArgOperand(1);
4281  Assert(isa<ConstantInt>(Base),
4282  "gc.relocate operand #2 must be integer offset", CS);
4283 
4284  Value* Derived = CS.getArgOperand(2);
4285  Assert(isa<ConstantInt>(Derived),
4286  "gc.relocate operand #3 must be integer offset", CS);
4287 
4288  const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4289  const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4290  // Check the bounds
4291  Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
4292  "gc.relocate: statepoint base index out of bounds", CS);
4293  Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
4294  "gc.relocate: statepoint derived index out of bounds", CS);
4295 
4296  // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4297  // section of the statepoint's argument.
4298  Assert(StatepointCS.arg_size() > 0,
4299  "gc.statepoint: insufficient arguments");
4300  Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
4301  "gc.statement: number of call arguments must be constant integer");
4302  const unsigned NumCallArgs =
4303  cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
4304  Assert(StatepointCS.arg_size() > NumCallArgs + 5,
4305  "gc.statepoint: mismatch in number of call arguments");
4306  Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
4307  "gc.statepoint: number of transition arguments must be "
4308  "a constant integer");
4309  const int NumTransitionArgs =
4310  cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
4311  ->getZExtValue();
4312  const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4313  Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
4314  "gc.statepoint: number of deoptimization arguments must be "
4315  "a constant integer");
4316  const int NumDeoptArgs =
4317  cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
4318  ->getZExtValue();
4319  const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4320  const int GCParamArgsEnd = StatepointCS.arg_size();
4321  Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4322  "gc.relocate: statepoint base index doesn't fall within the "
4323  "'gc parameters' section of the statepoint call",
4324  CS);
4325  Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4326  "gc.relocate: statepoint derived index doesn't fall within the "
4327  "'gc parameters' section of the statepoint call",
4328  CS);
4329 
4330  // Relocated value must be either a pointer type or vector-of-pointer type,
4331  // but gc_relocate does not need to return the same pointer type as the
4332  // relocated pointer. It can be casted to the correct type later if it's
4333  // desired. However, they must have the same address space and 'vectorness'
4334  GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
4336  "gc.relocate: relocated value must be a gc pointer", CS);
4337 
4338  auto ResultType = CS.getType();
4339  auto DerivedType = Relocate.getDerivedPtr()->getType();
4340  Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4341  "gc.relocate: vector relocates to vector and pointer to pointer",
4342  CS);
4343  Assert(
4344  ResultType->getPointerAddressSpace() ==
4345  DerivedType->getPointerAddressSpace(),
4346  "gc.relocate: relocating a pointer shouldn't change its address space",
4347  CS);
4348  break;
4349  }
4350  case Intrinsic::eh_exceptioncode:
4351  case Intrinsic::eh_exceptionpointer: {
4352  Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
4353  "eh.exceptionpointer argument must be a catchpad", CS);
4354  break;
4355  }
4356  case Intrinsic::masked_load: {
4357  Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
4358 
4359  Value *Ptr = CS.getArgOperand(0);
4360  //Value *Alignment = CS.getArgOperand(1);
4361  Value *Mask = CS.getArgOperand(2);
4362  Value *PassThru = CS.getArgOperand(3);
4363  Assert(Mask->getType()->isVectorTy(),
4364  "masked_load: mask must be vector", CS);
4365 
4366  // DataTy is the overloaded type
4367  Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4368  Assert(DataTy == CS.getType(),
4369  "masked_load: return must match pointer type", CS);
4370  Assert(PassThru->getType() == DataTy,
4371  "masked_load: pass through and data type must match", CS);
4372  Assert(Mask->getType()->getVectorNumElements() ==
4373  DataTy->getVectorNumElements(),
4374  "masked_load: vector mask must be same length as data", CS);
4375  break;
4376  }
4377  case Intrinsic::masked_store: {
4378  Value *Val = CS.getArgOperand(0);
4379  Value *Ptr = CS.getArgOperand(1);
4380  //Value *Alignment = CS.getArgOperand(2);
4381  Value *Mask = CS.getArgOperand(3);
4382  Assert(Mask->getType()->isVectorTy(),
4383  "masked_store: mask must be vector", CS);
4384 
4385  // DataTy is the overloaded type
4386  Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4387  Assert(DataTy == Val->getType(),
4388  "masked_store: storee must match pointer type", CS);
4389  Assert(Mask->getType()->getVectorNumElements() ==
4390  DataTy->getVectorNumElements(),
4391  "masked_store: vector mask must be same length as data", CS);
4392  break;
4393  }
4394 
4395  case Intrinsic::experimental_guard: {
4396  Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
4398  "experimental_guard must have exactly one "
4399  "\"deopt\" operand bundle");
4400  break;
4401  }
4402 
4403  case Intrinsic::experimental_deoptimize: {
4404  Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
4406  "experimental_deoptimize must have exactly one "
4407  "\"deopt\" operand bundle");
4409  "experimental_deoptimize return type must match caller return type");
4410 
4411  if (CS.isCall()) {
4412  auto *DeoptCI = CS.getInstruction();
4413  auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
4414  Assert(RI,
4415  "calls to experimental_deoptimize must be followed by a return");
4416 
4417  if (!CS.getType()->isVoidTy() && RI)
4418  Assert(RI->getReturnValue() == DeoptCI,
4419  "calls to experimental_deoptimize must be followed by a return "
4420  "of the value computed by experimental_deoptimize");
4421  }
4422 
4423  break;
4424  }
4425  };
4426 }
4427 
4428 /// \brief Carefully grab the subprogram from a local scope.
4429 ///
4430 /// This carefully grabs the subprogram from a local scope, avoiding the
4431 /// built-in assertions that would typically fire.
4432 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4433  if (!LocalScope)
4434  return nullptr;
4435 
4436  if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4437  return SP;
4438 
4439  if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4440  return getSubprogram(LB->getRawScope());
4441 
4442  // Just return null; broken scope chains are checked elsewhere.
4443  assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4444  return nullptr;
4445 }
4446 
4447 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4448  unsigned NumOperands = FPI.getNumArgOperands();
4449  Assert(((NumOperands == 5 && FPI.isTernaryOp()) ||
4450  (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)),
4451  "invalid arguments for constrained FP intrinsic", &FPI);
4452  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)),
4453  "invalid exception behavior argument", &FPI);
4454  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)),
4455  "invalid rounding mode argument", &FPI);
4457  "invalid rounding mode argument", &FPI);
4459  "invalid exception behavior argument", &FPI);
4460 }
4461 
4462 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII) {
4463  auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4464  AssertDI(isa<ValueAsMetadata>(MD) ||
4465  (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4466  "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4467  AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4468  "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4469  DII.getRawVariable());
4470  AssertDI(isa<DIExpression>(DII.getRawExpression()),
4471  "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4472  DII.getRawExpression());
4473 
4474  // Ignore broken !dbg attachments; they're checked elsewhere.
4475  if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4476  if (!isa<DILocation>(N))
4477  return;
4478 
4479  BasicBlock *BB = DII.getParent();
4480  Function *F = BB ? BB->getParent() : nullptr;
4481 
4482  // The scopes for variables and !dbg attachments must agree.
4483  DILocalVariable *Var = DII.getVariable();
4484  DILocation *Loc = DII.getDebugLoc();
4485  Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4486  &DII, BB, F);
4487 
4488  DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4489  DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4490  if (!VarSP || !LocSP)
4491  return; // Broken scope chains are checked elsewhere.
4492 
4493  AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4494  " variable and !dbg attachment",
4495  &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4496  Loc->getScope()->getSubprogram());
4497 
4498  verifyFnArgs(DII);
4499 }
4500 
4501 static uint64_t getVariableSize(const DIVariable &V) {
4502  // Be careful of broken types (checked elsewhere).
4503  const Metadata *RawType = V.getRawType();
4504  while (RawType) {
4505  // Try to get the size directly.
4506  if (auto *T = dyn_cast<DIType>(RawType))
4507  if (uint64_t Size = T->getSizeInBits())
4508  return Size;
4509 
4510  if (auto *DT = dyn_cast<DIDerivedType>(RawType)) {
4511  // Look at the base type.
4512  RawType = DT->getRawBaseType();
4513  continue;
4514  }
4515 
4516  // Missing type or size.
4517  break;
4518  }
4519 
4520  // Fail gracefully.
4521  return 0;
4522 }
4523 
4524 void Verifier::verifyFragmentExpression(const DbgInfoIntrinsic &I) {
4525  DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4526  DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4527 
4528  // We don't know whether this intrinsic verified correctly.
4529  if (!V || !E || !E->isValid())
4530  return;
4531 
4532  // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4533  auto Fragment = E->getFragmentInfo();
4534  if (!Fragment)
4535  return;
4536 
4537  // The frontend helps out GDB by emitting the members of local anonymous
4538  // unions as artificial local variables with shared storage. When SROA splits
4539  // the storage for artificial local variables that are smaller than the entire
4540  // union, the overhang piece will be outside of the allotted space for the
4541  // variable and this check fails.
4542  // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4543  if (V->isArtificial())
4544  return;
4545 
4546  verifyFragmentExpression(*V, *Fragment, &I);
4547 }
4548 
4549 template <typename ValueOrMetadata>
4550 void Verifier::verifyFragmentExpression(const DIVariable &V,
4551  DIExpression::FragmentInfo Fragment,
4552  ValueOrMetadata *Desc) {
4553  // If there's no size, the type is broken, but that should be checked
4554  // elsewhere.
4555  uint64_t VarSize = getVariableSize(V);
4556  if (!VarSize)
4557  return;
4558 
4559  unsigned FragSize = Fragment.SizeInBits;
4560  unsigned FragOffset = Fragment.OffsetInBits;
4561  AssertDI(FragSize + FragOffset <= VarSize,
4562  "fragment is larger than or outside of variable", Desc, &V);
4563  AssertDI(FragSize != VarSize, "fragment covers entire variable", Desc, &V);
4564 }
4565 
4566 void Verifier::verifyFnArgs(const DbgInfoIntrinsic &I) {
4567  // This function does not take the scope of noninlined function arguments into
4568  // account. Don't run it if current function is nodebug, because it may
4569  // contain inlined debug intrinsics.
4570  if (!HasDebugInfo)
4571  return;
4572 
4573  // For performance reasons only check non-inlined ones.
4574  if (I.getDebugLoc()->getInlinedAt())
4575  return;
4576 
4577  DILocalVariable *Var = I.getVariable();
4578  AssertDI(Var, "dbg intrinsic without variable");
4579 
4580  unsigned ArgNo = Var->getArg();
4581  if (!ArgNo)
4582  return;
4583 
4584  // Verify there are no duplicate function argument debug info entries.
4585  // These will cause hard-to-debug assertions in the DWARF backend.
4586  if (DebugFnArgs.size() < ArgNo)
4587  DebugFnArgs.resize(ArgNo, nullptr);
4588 
4589  auto *Prev = DebugFnArgs[ArgNo - 1];
4590  DebugFnArgs[ArgNo - 1] = Var;
4591  AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
4592  Prev, Var);
4593 }
4594 
4595 void Verifier::verifyCompileUnits() {
4596  auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
4598  if (CUs)
4599  Listed.insert(CUs->op_begin(), CUs->op_end());
4600  for (auto *CU : CUVisited)
4601  AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
4602  CUVisited.clear();
4603 }
4604 
4605 void Verifier::verifyDeoptimizeCallingConvs() {
4606  if (DeoptimizeDeclarations.empty())
4607  return;
4608 
4609  const Function *First = DeoptimizeDeclarations[0];
4610  for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
4611  Assert(First->getCallingConv() == F->getCallingConv(),
4612  "All llvm.experimental.deoptimize declarations must have the same "
4613  "calling convention",
4614  First, F);
4615  }
4616 }
4617 
4618 //===----------------------------------------------------------------------===//
4619 // Implement the public interfaces to this file...
4620 //===----------------------------------------------------------------------===//
4621 
4623  Function &F = const_cast<Function &>(f);
4624 
4625  // Don't use a raw_null_ostream. Printing IR is expensive.
4626  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
4627 
4628  // Note that this function's return value is inverted from what you would
4629  // expect of a function called "verify".
4630  return !V.verify(F);
4631 }
4632 
4634  bool *BrokenDebugInfo) {
4635  // Don't use a raw_null_ostream. Printing IR is expensive.
4636  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
4637 
4638  bool Broken = false;
4639  for (const Function &F : M)
4640  Broken |= !V.verify(F);
4641 
4642  Broken |= !V.verify();
4643  if (BrokenDebugInfo)
4644  *BrokenDebugInfo = V.hasBrokenDebugInfo();
4645  // Note that this function's return value is inverted from what you would
4646  // expect of a function called "verify".
4647  return Broken;
4648 }
4649 
4650 namespace {
4651 
4652 struct VerifierLegacyPass : public FunctionPass {
4653  static char ID;
4654 
4655  std::unique_ptr<Verifier> V;
4656  bool FatalErrors = true;
4657 
4658  VerifierLegacyPass() : FunctionPass(ID) {
4660  }
4661  explicit VerifierLegacyPass(bool FatalErrors)
4662  : FunctionPass(ID),
4663  FatalErrors(FatalErrors) {
4665  }
4666 
4667  bool doInitialization(Module &M) override {
4668  V = llvm::make_unique<Verifier>(
4669  &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
4670  return false;
4671  }
4672 
4673  bool runOnFunction(Function &F) override {
4674  if (!V->verify(F) && FatalErrors)
4675  report_fatal_error("Broken function found, compilation aborted!");
4676 
4677  return false;
4678  }
4679 
4680  bool doFinalization(Module &M) override {
4681  bool HasErrors = false;
4682  for (Function &F : M)
4683  if (F.isDeclaration())
4684  HasErrors |= !V->verify(F);
4685 
4686  HasErrors |= !V->verify();
4687  if (FatalErrors) {
4688  if (HasErrors)
4689  report_fatal_error("Broken module found, compilation aborted!");
4690  assert(!V->hasBrokenDebugInfo() && "Module contains invalid debug info");
4691  }
4692 
4693  // Strip broken debug info.
4694  if (V->hasBrokenDebugInfo()) {
4696  M.getContext().diagnose(DiagInvalid);
4697  if (!StripDebugInfo(M))
4698  report_fatal_error("Failed to strip malformed debug info");
4699  }
4700  return false;
4701  }
4702 
4703  void getAnalysisUsage(AnalysisUsage &AU) const override {
4704  AU.setPreservesAll();
4705  }
4706 };
4707 
4708 } // end anonymous namespace
4709 
4710 /// Helper to issue failure from the TBAA verification
4711 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
4712  if (Diagnostic)
4713  return Diagnostic->CheckFailed(Args...);
4714 }
4715 
4716 #define AssertTBAA(C, ...) \
4717  do { \
4718  if (!(C)) { \
4719  CheckFailed(__VA_ARGS__); \
4720  return false; \
4721  } \
4722  } while (false)
4723 
4724 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
4725 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a
4726 /// struct-type node describing an aggregate data structure (like a struct).
4727 TBAAVerifier::TBAABaseNodeSummary
4728 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode) {
4729  if (BaseNode->getNumOperands() < 2) {
4730  CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
4731  return {true, ~0u};
4732  }
4733 
4734  auto Itr = TBAABaseNodes.find(BaseNode);
4735  if (Itr != TBAABaseNodes.end())
4736  return Itr->second;
4737 
4738  auto Result = verifyTBAABaseNodeImpl(I, BaseNode);
4739  auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
4740  (void)InsertResult;
4741  assert(InsertResult.second && "We just checked!");
4742  return Result;
4743 }
4744 
4745 TBAAVerifier::TBAABaseNodeSummary
4746 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode) {
4747  const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
4748 
4749  if (BaseNode->getNumOperands() == 2) {
4750  // Scalar nodes can only be accessed at offset 0.
4751  return isValidScalarTBAANode(BaseNode)
4752  ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
4753  : InvalidNode;
4754  }
4755 
4756  if (BaseNode->getNumOperands() % 2 != 1) {
4757  CheckFailed("Struct tag nodes must have an odd number of operands!",
4758  BaseNode);
4759  return InvalidNode;
4760  }
4761 
4762  if (!isa<MDString>(BaseNode->getOperand(0))) {
4763  CheckFailed("Struct tag nodes have a string as their first operand",
4764  BaseNode);
4765  return InvalidNode;
4766  }
4767 
4768  bool Failed = false;
4769 
4770  Optional<APInt> PrevOffset;
4771  unsigned BitWidth = ~0u;
4772 
4773  // We've already checked that BaseNode is not a degenerate root node with one
4774  // operand in \c verifyTBAABaseNode, so this loop should run at least once.
4775  for (unsigned Idx = 1; Idx < BaseNode->getNumOperands(); Idx += 2) {
4776  const MDOperand &FieldTy = BaseNode->getOperand(Idx);
4777  const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
4778  if (!isa<MDNode>(FieldTy)) {
4779  CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
4780  Failed = true;
4781  continue;
4782  }
4783 
4784  auto *OffsetEntryCI =
4785  mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
4786  if (!OffsetEntryCI) {
4787  CheckFailed("Offset entries must be constants!", &I, BaseNode);
4788  Failed = true;
4789  continue;
4790  }
4791 
4792  if (BitWidth == ~0u)
4793  BitWidth = OffsetEntryCI->getBitWidth();
4794 
4795  if (OffsetEntryCI->getBitWidth() != BitWidth) {
4796  CheckFailed(
4797  "Bitwidth between the offsets and struct type entries must match", &I,
4798  BaseNode);
4799  Failed = true;
4800  continue;
4801  }
4802 
4803  // NB! As far as I can tell, we generate a non-strictly increasing offset
4804  // sequence only from structs that have zero size bit fields. When
4805  // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
4806  // pick the field lexically the latest in struct type metadata node. This
4807  // mirrors the actual behavior of the alias analysis implementation.
4808  bool IsAscending =
4809  !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
4810 
4811  if (!IsAscending) {
4812  CheckFailed("Offsets must be increasing!", &I, BaseNode);
4813  Failed = true;
4814  }
4815 
4816  PrevOffset = OffsetEntryCI->getValue();
4817  }
4818 
4819  return Failed ? InvalidNode
4820  : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
4821 }
4822 
4823 static bool IsRootTBAANode(const MDNode *MD) {
4824  return MD->getNumOperands() < 2;
4825 }
4826 
4827 static bool IsScalarTBAANodeImpl(const MDNode *MD,
4829  if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
4830  return false;
4831 
4832  if (!isa<MDString>(MD->getOperand(0)))
4833  return false;
4834 
4835  if (MD->getNumOperands() == 3) {
4836  auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
4837  if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
4838  return false;
4839  }
4840 
4841  auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
4842  return Parent && Visited.insert(Parent).second &&
4843  (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
4844 }
4845 
4846 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
4847  auto ResultIt = TBAAScalarNodes.find(MD);
4848  if (ResultIt != TBAAScalarNodes.end())
4849  return ResultIt->second;
4850 
4852  bool Result = IsScalarTBAANodeImpl(MD, Visited);
4853  auto InsertResult = TBAAScalarNodes.insert({MD, Result});
4854  (void)InsertResult;
4855  assert(InsertResult.second && "Just checked!");
4856 
4857  return Result;
4858 }
4859 
4860 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
4861 /// Offset in place to be the offset within the field node returned.
4862 ///
4863 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
4864 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
4865  const MDNode *BaseNode,
4866  APInt &Offset) {
4867  assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
4868 
4869  // Scalar nodes have only one possible "field" -- their parent in the access
4870  // hierarchy. Offset must be zero at this point, but our caller is supposed
4871  // to Assert that.
4872  if (BaseNode->getNumOperands() == 2)
4873  return cast<MDNode>(BaseNode->getOperand(1));
4874 
4875  for (unsigned Idx = 1; Idx < BaseNode->getNumOperands(); Idx += 2) {
4876  auto *OffsetEntryCI =
4877  mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
4878  if (OffsetEntryCI->getValue().ugt(Offset)) {
4879  if (Idx == 1) {
4880  CheckFailed("Could not find TBAA parent in struct type node", &I,
4881  BaseNode, &Offset);
4882  return nullptr;
4883  }
4884 
4885  auto *PrevOffsetEntryCI =
4886  mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx - 1));
4887  Offset -= PrevOffsetEntryCI->getValue();
4888  return cast<MDNode>(BaseNode->getOperand(Idx - 2));
4889  }
4890  }
4891 
4892  auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
4893  BaseNode->getOperand(BaseNode->getNumOperands() - 1));
4894 
4895  Offset -= LastOffsetEntryCI->getValue();
4896  return cast<MDNode>(BaseNode->getOperand(BaseNode->getNumOperands() - 2));
4897 }
4898 
4900  AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
4901  isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
4902  isa<AtomicCmpXchgInst>(I),
4903  "TBAA is only for loads, stores and calls!", &I);
4904 
4905  bool IsStructPathTBAA =
4906  isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
4907 
4908  AssertTBAA(
4909  IsStructPathTBAA,
4910  "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
4911 
4912  AssertTBAA(MD->getNumOperands() < 5,
4913  "Struct tag metadata must have either 3 or 4 operands", &I, MD);
4914 
4915  MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
4916  MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
4917 
4918  if (MD->getNumOperands() == 4) {
4919  auto *IsImmutableCI =
4920  mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(3));
4921  AssertTBAA(IsImmutableCI,
4922  "Immutability tag on struct tag metadata must be a constant", &I,
4923  MD);
4924  AssertTBAA(
4925  IsImmutableCI->isZero() || IsImmutableCI->isOne(),
4926  "Immutability part of the struct tag metadata must be either 0 or 1",
4927  &I, MD);
4928  }
4929 
4930  AssertTBAA(BaseNode && AccessType,
4931  "Malformed struct tag metadata: base and access-type "
4932  "should be non-null and point to Metadata nodes",
4933  &I, MD, BaseNode, AccessType);
4934 
4935  AssertTBAA(isValidScalarTBAANode(AccessType),
4936  "Access type node must be a valid scalar type", &I, MD,
4937  AccessType);
4938 
4939  auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
4940  AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
4941 
4942  APInt Offset = OffsetCI->getValue();
4943  bool SeenAccessTypeInPath = false;
4944 
4945  SmallPtrSet<MDNode *, 4> StructPath;
4946 
4947  for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
4948  BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset)) {
4949  if (!StructPath.insert(BaseNode).second) {
4950  CheckFailed("Cycle detected in struct path", &I, MD);
4951  return false;
4952  }
4953 
4954  bool Invalid;
4955  unsigned BaseNodeBitWidth;
4956  std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode);
4957 
4958  // If the base node is invalid in itself, then we've already printed all the
4959  // errors we wanted to print.
4960  if (Invalid)
4961  return false;
4962 
4963  SeenAccessTypeInPath |= BaseNode == AccessType;
4964 
4965  if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
4966  AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access",
4967  &I, MD, &Offset);
4968 
4969  AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
4970  (BaseNodeBitWidth == 0 && Offset == 0),
4971  "Access bit-width not the same as description bit-width", &I, MD,
4972  BaseNodeBitWidth, Offset.getBitWidth());
4973  }
4974 
4975  AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!",
4976  &I, MD);
4977  return true;
4978 }
4979 
4980 char VerifierLegacyPass::ID = 0;
4981 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
4982 
4983 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
4984  return new VerifierLegacyPass(FatalErrors);
4985 }
4986 
4987 AnalysisKey VerifierAnalysis::Key;
4990  Result Res;
4991  Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
4992  return Res;
4993 }
4994 
4997  return { llvm::verifyFunction(F, &dbgs()), false };
4998 }
4999 
5001  auto Res = AM.getResult<VerifierAnalysis>(M);
5002  if (FatalErrors) {
5003  if (Res.IRBroken)
5004  report_fatal_error("Broken module found, compilation aborted!");
5005  assert(!Res.DebugInfoBroken && "Module contains invalid debug info");
5006  }
5007 
5008  // Strip broken debug info.
5009  if (Res.DebugInfoBroken) {
5011  M.getContext().diagnose(DiagInvalid);
5012  if (!StripDebugInfo(M))
5013  report_fatal_error("Failed to strip malformed debug info");
5014  }
5015  return PreservedAnalyses::all();
5016 }
5017 
5019  auto res = AM.getResult<VerifierAnalysis>(F);
5020  if (res.IRBroken && FatalErrors)
5021  report_fatal_error("Broken function found, compilation aborted!");
5022 
5023  return PreservedAnalyses::all();
5024 }
DIFlags getFlags() const
bool isDeclarationForLinker() const
Definition: GlobalValue.h:503
Calling convention used for Mesa hull shaders.
Definition: CallingConv.h:206
Metadata * getRawRetainedTypes() const
uint64_t CallInst * C
Return a value (possibly void), from a function.
User::op_iterator arg_iterator
The type of iterator to use when looping over actual arguments at this call site. ...
Definition: CallSite.h:213
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:180
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:837
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
Definition: Constants.h:172
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:709
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:3299
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
iterator_range< use_iterator > uses()
Definition: Value.h:350
bool empty() const
Definition: Function.h:594
bool isDistinct() const
Definition: Metadata.h:941
void clear()
Definition: MapVector.h:78
unsigned getOpcode() const
Return the opcode at the root of this constant expression.
Definition: Constants.h:1171
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
unsigned getLine() const
bool isInlineAsm() const
Definition: CallSite.h:305
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
Base class for instruction visitors.
Definition: InstVisitor.h:81
Value * getAggregateOperand()
unsigned arg_size() const
Definition: CallSite.h:219
bool isSpeculatable() const
Determine if the call has sideeffects.
Definition: Function.h:479
const Value * stripInBoundsOffsets() const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:587
Atomic ordering constants.
Takes the max of the two values, which are required to be integers.
Definition: Module.h:143
StringRef getName() const
struct fuzzer::@309 Flags
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
bool hasPrivateLinkage() const
Definition: GlobalValue.h:415
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
bool isMetadataTy() const
Return true if this is &#39;metadata&#39;.
Definition: Type.h:191
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:103
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BinaryOps getOpcode() const
Definition: InstrTypes.h:523
unsigned getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: CallSite.h:406
const APInt & getUpper() const
Return the upper value for this range.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
Definition: StringMap.h:126
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
Metadata * getRawGlobalVariables() const
Metadata * getRawVTableHolder() const
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
Metadata * getRawVariable() const
Definition: IntrinsicInst.h:88
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:4988
An instruction for ordering other memory operations.
Definition: Instructions.h:440
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:514
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: CallSite.h:471
This class represents zero extension of integer types.
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:313
DIFile * getFile() const
Metadata * getMetadata() const
Definition: Metadata.h:189
ModuleSlotTracker MST
Definition: Verifier.cpp:125
Metadata * getRawFile() const
Return the raw underlying file.
This class represents a function call, abstracting a target machine&#39;s calling convention.
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:403
This file contains the declarations for metadata subclasses.
Metadata * getRawInlinedAt() const
Value * getCondition() const
iterator_range< IterTy > args() const
Definition: CallSite.h:215
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:370
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:4983
const Value * getTrueValue() const
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:87
bool isSwiftError() const
Return true if this alloca is used as a swi