LLVM  9.0.0svn
Verifier.cpp
Go to the documentation of this file.
1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // sanity checking of input to the system.
11 //
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
14 //
15 // * Both of a binary operator's parameters are of the same type
16 // * Verify that the indices of mem access instructions match other operands
17 // * Verify that arithmetic and other things are only performed on first-class
18 // types. Verify that shifts & logicals only happen on integrals f.e.
19 // * All of the constants in a switch statement are of the correct type
20 // * The code is in valid SSA form
21 // * It should be illegal to put a label into any other type (like a structure)
22 // or to return one. [except constant arrays!]
23 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 // * PHI nodes must have an entry for each predecessor, with no extras.
25 // * PHI nodes must be the first thing in a basic block, all grouped together
26 // * PHI nodes must have at least one entry
27 // * All basic blocks should only end with terminator insts, not contain them
28 // * The entry node to a function must not have predecessors
29 // * All Instructions must be embedded into a basic block
30 // * Functions cannot take a void-typed parameter
31 // * Verify that a function's argument list agrees with it's declared type.
32 // * It is illegal to specify a name for a void value.
33 // * It is illegal to have a internal global value with no initializer
34 // * It is illegal to have a ret instruction that returns a value that does not
35 // agree with the function return value type.
36 // * Function call argument types match the function prototype
37 // * A landing pad is defined by a landingpad instruction, and can be jumped to
38 // only by the unwind edge of an invoke instruction.
39 // * A landingpad instruction must be the first non-PHI instruction in the
40 // block.
41 // * Landingpad instructions must be in a function with a personality function.
42 // * All other things that are tested by asserts spread about the code...
43 //
44 //===----------------------------------------------------------------------===//
45 
46 #include "llvm/IR/Verifier.h"
47 #include "llvm/ADT/APFloat.h"
48 #include "llvm/ADT/APInt.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/Optional.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/ADT/ilist.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/CallingConv.h"
68 #include "llvm/IR/Comdat.h"
69 #include "llvm/IR/Constant.h"
70 #include "llvm/IR/ConstantRange.h"
71 #include "llvm/IR/Constants.h"
72 #include "llvm/IR/DataLayout.h"
73 #include "llvm/IR/DebugInfo.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/GlobalVariable.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstVisitor.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/LLVMContext.h"
90 #include "llvm/IR/Metadata.h"
91 #include "llvm/IR/Module.h"
93 #include "llvm/IR/PassManager.h"
94 #include "llvm/IR/Statepoint.h"
95 #include "llvm/IR/Type.h"
96 #include "llvm/IR/Use.h"
97 #include "llvm/IR/User.h"
98 #include "llvm/IR/Value.h"
99 #include "llvm/Pass.h"
101 #include "llvm/Support/Casting.h"
103 #include "llvm/Support/Debug.h"
105 #include "llvm/Support/MathExtras.h"
107 #include <algorithm>
108 #include <cassert>
109 #include <cstdint>
110 #include <memory>
111 #include <string>
112 #include <utility>
113 
114 using namespace llvm;
115 
116 namespace llvm {
117 
120  const Module &M;
122  const DataLayout &DL;
124 
125  /// Track the brokenness of the module while recursively visiting.
126  bool Broken = false;
127  /// Broken debug info can be "recovered" from by stripping the debug info.
128  bool BrokenDebugInfo = false;
129  /// Whether to treat broken debug info as an error.
131 
132  explicit VerifierSupport(raw_ostream *OS, const Module &M)
133  : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {}
134 
135 private:
136  void Write(const Module *M) {
137  *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
138  }
139 
140  void Write(const Value *V) {
141  if (V)
142  Write(*V);
143  }
144 
145  void Write(const Value &V) {
146  if (isa<Instruction>(V)) {
147  V.print(*OS, MST);
148  *OS << '\n';
149  } else {
150  V.printAsOperand(*OS, true, MST);
151  *OS << '\n';
152  }
153  }
154 
155  void Write(const Metadata *MD) {
156  if (!MD)
157  return;
158  MD->print(*OS, MST, &M);
159  *OS << '\n';
160  }
161 
162  template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
163  Write(MD.get());
164  }
165 
166  void Write(const NamedMDNode *NMD) {
167  if (!NMD)
168  return;
169  NMD->print(*OS, MST);
170  *OS << '\n';
171  }
172 
173  void Write(Type *T) {
174  if (!T)
175  return;
176  *OS << ' ' << *T;
177  }
178 
179  void Write(const Comdat *C) {
180  if (!C)
181  return;
182  *OS << *C;
183  }
184 
185  void Write(const APInt *AI) {
186  if (!AI)
187  return;
188  *OS << *AI << '\n';
189  }
190 
191  void Write(const unsigned i) { *OS << i << '\n'; }
192 
193  template <typename T> void Write(ArrayRef<T> Vs) {
194  for (const T &V : Vs)
195  Write(V);
196  }
197 
198  template <typename T1, typename... Ts>
199  void WriteTs(const T1 &V1, const Ts &... Vs) {
200  Write(V1);
201  WriteTs(Vs...);
202  }
203 
204  template <typename... Ts> void WriteTs() {}
205 
206 public:
207  /// A check failed, so printout out the condition and the message.
208  ///
209  /// This provides a nice place to put a breakpoint if you want to see why
210  /// something is not correct.
211  void CheckFailed(const Twine &Message) {
212  if (OS)
213  *OS << Message << '\n';
214  Broken = true;
215  }
216 
217  /// A check failed (with values to print).
218  ///
219  /// This calls the Message-only version so that the above is easier to set a
220  /// breakpoint on.
221  template <typename T1, typename... Ts>
222  void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
223  CheckFailed(Message);
224  if (OS)
225  WriteTs(V1, Vs...);
226  }
227 
228  /// A debug info check failed.
229  void DebugInfoCheckFailed(const Twine &Message) {
230  if (OS)
231  *OS << Message << '\n';
232  Broken |= TreatBrokenDebugInfoAsError;
233  BrokenDebugInfo = true;
234  }
235 
236  /// A debug info check failed (with values to print).
237  template <typename T1, typename... Ts>
238  void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
239  const Ts &... Vs) {
240  DebugInfoCheckFailed(Message);
241  if (OS)
242  WriteTs(V1, Vs...);
243  }
244 };
245 
246 } // namespace llvm
247 
248 namespace {
249 
250 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
251  friend class InstVisitor<Verifier>;
252 
253  DominatorTree DT;
254 
255  /// When verifying a basic block, keep track of all of the
256  /// instructions we have seen so far.
257  ///
258  /// This allows us to do efficient dominance checks for the case when an
259  /// instruction has an operand that is an instruction in the same block.
260  SmallPtrSet<Instruction *, 16> InstsInThisBlock;
261 
262  /// Keep track of the metadata nodes that have been checked already.
264 
265  /// Keep track which DISubprogram is attached to which function.
266  DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
267 
268  /// Track all DICompileUnits visited.
270 
271  /// The result type for a landingpad.
272  Type *LandingPadResultTy;
273 
274  /// Whether we've seen a call to @llvm.localescape in this function
275  /// already.
276  bool SawFrameEscape;
277 
278  /// Whether the current function has a DISubprogram attached to it.
279  bool HasDebugInfo = false;
280 
281  /// Whether source was present on the first DIFile encountered in each CU.
282  DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
283 
284  /// Stores the count of how many objects were passed to llvm.localescape for a
285  /// given function and the largest index passed to llvm.localrecover.
287 
288  // Maps catchswitches and cleanuppads that unwind to siblings to the
289  // terminators that indicate the unwind, used to detect cycles therein.
290  MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
291 
292  /// Cache of constants visited in search of ConstantExprs.
293  SmallPtrSet<const Constant *, 32> ConstantExprVisited;
294 
295  /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
296  SmallVector<const Function *, 4> DeoptimizeDeclarations;
297 
298  // Verify that this GlobalValue is only used in this module.
299  // This map is used to avoid visiting uses twice. We can arrive at a user
300  // twice, if they have multiple operands. In particular for very large
301  // constant expressions, we can arrive at a particular user many times.
302  SmallPtrSet<const Value *, 32> GlobalValueVisited;
303 
304  // Keeps track of duplicate function argument debug info.
306 
307  TBAAVerifier TBAAVerifyHelper;
308 
309  void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
310 
311 public:
312  explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
313  const Module &M)
314  : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
315  SawFrameEscape(false), TBAAVerifyHelper(this) {
316  TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
317  }
318 
319  bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
320 
321  bool verify(const Function &F) {
322  assert(F.getParent() == &M &&
323  "An instance of this class only works with a specific module!");
324 
325  // First ensure the function is well-enough formed to compute dominance
326  // information, and directly compute a dominance tree. We don't rely on the
327  // pass manager to provide this as it isolates us from a potentially
328  // out-of-date dominator tree and makes it significantly more complex to run
329  // this code outside of a pass manager.
330  // FIXME: It's really gross that we have to cast away constness here.
331  if (!F.empty())
332  DT.recalculate(const_cast<Function &>(F));
333 
334  for (const BasicBlock &BB : F) {
335  if (!BB.empty() && BB.back().isTerminator())
336  continue;
337 
338  if (OS) {
339  *OS << "Basic Block in function '" << F.getName()
340  << "' does not have terminator!\n";
341  BB.printAsOperand(*OS, true, MST);
342  *OS << "\n";
343  }
344  return false;
345  }
346 
347  Broken = false;
348  // FIXME: We strip const here because the inst visitor strips const.
349  visit(const_cast<Function &>(F));
350  verifySiblingFuncletUnwinds();
351  InstsInThisBlock.clear();
352  DebugFnArgs.clear();
353  LandingPadResultTy = nullptr;
354  SawFrameEscape = false;
355  SiblingFuncletInfo.clear();
356 
357  return !Broken;
358  }
359 
360  /// Verify the module that this instance of \c Verifier was initialized with.
361  bool verify() {
362  Broken = false;
363 
364  // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
365  for (const Function &F : M)
366  if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
367  DeoptimizeDeclarations.push_back(&F);
368 
369  // Now that we've visited every function, verify that we never asked to
370  // recover a frame index that wasn't escaped.
371  verifyFrameRecoverIndices();
372  for (const GlobalVariable &GV : M.globals())
373  visitGlobalVariable(GV);
374 
375  for (const GlobalAlias &GA : M.aliases())
376  visitGlobalAlias(GA);
377 
378  for (const NamedMDNode &NMD : M.named_metadata())
379  visitNamedMDNode(NMD);
380 
381  for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
382  visitComdat(SMEC.getValue());
383 
384  visitModuleFlags(M);
385  visitModuleIdents(M);
386  visitModuleCommandLines(M);
387 
388  verifyCompileUnits();
389 
390  verifyDeoptimizeCallingConvs();
391  DISubprogramAttachments.clear();
392  return !Broken;
393  }
394 
395 private:
396  // Verification methods...
397  void visitGlobalValue(const GlobalValue &GV);
398  void visitGlobalVariable(const GlobalVariable &GV);
399  void visitGlobalAlias(const GlobalAlias &GA);
400  void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
401  void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
402  const GlobalAlias &A, const Constant &C);
403  void visitNamedMDNode(const NamedMDNode &NMD);
404  void visitMDNode(const MDNode &MD);
405  void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
406  void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
407  void visitComdat(const Comdat &C);
408  void visitModuleIdents(const Module &M);
409  void visitModuleCommandLines(const Module &M);
410  void visitModuleFlags(const Module &M);
411  void visitModuleFlag(const MDNode *Op,
413  SmallVectorImpl<const MDNode *> &Requirements);
414  void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
415  void visitFunction(const Function &F);
416  void visitBasicBlock(BasicBlock &BB);
417  void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
418  void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
419 
420  template <class Ty> bool isValidMetadataArray(const MDTuple &N);
421 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
422 #include "llvm/IR/Metadata.def"
423  void visitDIScope(const DIScope &N);
424  void visitDIVariable(const DIVariable &N);
425  void visitDILexicalBlockBase(const DILexicalBlockBase &N);
426  void visitDITemplateParameter(const DITemplateParameter &N);
427 
428  void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
429 
430  // InstVisitor overrides...
432  void visit(Instruction &I);
433 
434  void visitTruncInst(TruncInst &I);
435  void visitZExtInst(ZExtInst &I);
436  void visitSExtInst(SExtInst &I);
437  void visitFPTruncInst(FPTruncInst &I);
438  void visitFPExtInst(FPExtInst &I);
439  void visitFPToUIInst(FPToUIInst &I);
440  void visitFPToSIInst(FPToSIInst &I);
441  void visitUIToFPInst(UIToFPInst &I);
442  void visitSIToFPInst(SIToFPInst &I);
443  void visitIntToPtrInst(IntToPtrInst &I);
444  void visitPtrToIntInst(PtrToIntInst &I);
445  void visitBitCastInst(BitCastInst &I);
446  void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
447  void visitPHINode(PHINode &PN);
448  void visitCallBase(CallBase &Call);
449  void visitUnaryOperator(UnaryOperator &U);
450  void visitBinaryOperator(BinaryOperator &B);
451  void visitICmpInst(ICmpInst &IC);
452  void visitFCmpInst(FCmpInst &FC);
453  void visitExtractElementInst(ExtractElementInst &EI);
454  void visitInsertElementInst(InsertElementInst &EI);
455  void visitShuffleVectorInst(ShuffleVectorInst &EI);
456  void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
457  void visitCallInst(CallInst &CI);
458  void visitInvokeInst(InvokeInst &II);
459  void visitGetElementPtrInst(GetElementPtrInst &GEP);
460  void visitLoadInst(LoadInst &LI);
461  void visitStoreInst(StoreInst &SI);
462  void verifyDominatesUse(Instruction &I, unsigned i);
463  void visitInstruction(Instruction &I);
464  void visitTerminator(Instruction &I);
465  void visitBranchInst(BranchInst &BI);
466  void visitReturnInst(ReturnInst &RI);
467  void visitSwitchInst(SwitchInst &SI);
468  void visitIndirectBrInst(IndirectBrInst &BI);
469  void visitCallBrInst(CallBrInst &CBI);
470  void visitSelectInst(SelectInst &SI);
471  void visitUserOp1(Instruction &I);
472  void visitUserOp2(Instruction &I) { visitUserOp1(I); }
473  void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
474  void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
475  void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
476  void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
477  void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
478  void visitAtomicRMWInst(AtomicRMWInst &RMWI);
479  void visitFenceInst(FenceInst &FI);
480  void visitAllocaInst(AllocaInst &AI);
481  void visitExtractValueInst(ExtractValueInst &EVI);
482  void visitInsertValueInst(InsertValueInst &IVI);
483  void visitEHPadPredecessors(Instruction &I);
484  void visitLandingPadInst(LandingPadInst &LPI);
485  void visitResumeInst(ResumeInst &RI);
486  void visitCatchPadInst(CatchPadInst &CPI);
487  void visitCatchReturnInst(CatchReturnInst &CatchReturn);
488  void visitCleanupPadInst(CleanupPadInst &CPI);
489  void visitFuncletPadInst(FuncletPadInst &FPI);
490  void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
491  void visitCleanupReturnInst(CleanupReturnInst &CRI);
492 
493  void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
494  void verifySwiftErrorValue(const Value *SwiftErrorVal);
495  void verifyMustTailCall(CallInst &CI);
496  bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
497  unsigned ArgNo, std::string &Suffix);
498  bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
499  void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
500  const Value *V);
501  void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
502  void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
503  const Value *V, bool IsIntrinsic);
504  void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
505 
506  void visitConstantExprsRecursively(const Constant *EntryC);
507  void visitConstantExpr(const ConstantExpr *CE);
508  void verifyStatepoint(const CallBase &Call);
509  void verifyFrameRecoverIndices();
510  void verifySiblingFuncletUnwinds();
511 
512  void verifyFragmentExpression(const DbgVariableIntrinsic &I);
513  template <typename ValueOrMetadata>
514  void verifyFragmentExpression(const DIVariable &V,
516  ValueOrMetadata *Desc);
517  void verifyFnArgs(const DbgVariableIntrinsic &I);
518 
519  /// Module-level debug info verification...
520  void verifyCompileUnits();
521 
522  /// Module-level verification that all @llvm.experimental.deoptimize
523  /// declarations share the same calling convention.
524  void verifyDeoptimizeCallingConvs();
525 
526  /// Verify all-or-nothing property of DIFile source attribute within a CU.
527  void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
528 };
529 
530 } // end anonymous namespace
531 
532 /// We know that cond should be true, if not print an error message.
533 #define Assert(C, ...) \
534  do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
535 
536 /// We know that a debug info condition should be true, if not print
537 /// an error message.
538 #define AssertDI(C, ...) \
539  do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
540 
541 void Verifier::visit(Instruction &I) {
542  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
543  Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
545 }
546 
547 // Helper to recursively iterate over indirect users. By
548 // returning false, the callback can ask to stop recursing
549 // further.
550 static void forEachUser(const Value *User,
552  llvm::function_ref<bool(const Value *)> Callback) {
553  if (!Visited.insert(User).second)
554  return;
555  for (const Value *TheNextUser : User->materialized_users())
556  if (Callback(TheNextUser))
557  forEachUser(TheNextUser, Visited, Callback);
558 }
559 
560 void Verifier::visitGlobalValue(const GlobalValue &GV) {
562  "Global is external, but doesn't have external or weak linkage!", &GV);
563 
565  "huge alignment values are unsupported", &GV);
566  Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
567  "Only global variables can have appending linkage!", &GV);
568 
569  if (GV.hasAppendingLinkage()) {
570  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
571  Assert(GVar && GVar->getValueType()->isArrayTy(),
572  "Only global arrays can have appending linkage!", GVar);
573  }
574 
575  if (GV.isDeclarationForLinker())
576  Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
577 
578  if (GV.hasDLLImportStorageClass()) {
579  Assert(!GV.isDSOLocal(),
580  "GlobalValue with DLLImport Storage is dso_local!", &GV);
581 
582  Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
584  "Global is marked as dllimport, but not external", &GV);
585  }
586 
587  if (GV.hasLocalLinkage())
588  Assert(GV.isDSOLocal(),
589  "GlobalValue with private or internal linkage must be dso_local!",
590  &GV);
591 
592  if (!GV.hasDefaultVisibility() && !GV.hasExternalWeakLinkage())
593  Assert(GV.isDSOLocal(),
594  "GlobalValue with non default visibility must be dso_local!", &GV);
595 
596  forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
597  if (const Instruction *I = dyn_cast<Instruction>(V)) {
598  if (!I->getParent() || !I->getParent()->getParent())
599  CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
600  I);
601  else if (I->getParent()->getParent()->getParent() != &M)
602  CheckFailed("Global is referenced in a different module!", &GV, &M, I,
603  I->getParent()->getParent(),
604  I->getParent()->getParent()->getParent());
605  return false;
606  } else if (const Function *F = dyn_cast<Function>(V)) {
607  if (F->getParent() != &M)
608  CheckFailed("Global is used by function in a different module", &GV, &M,
609  F, F->getParent());
610  return false;
611  }
612  return true;
613  });
614 }
615 
616 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
617  if (GV.hasInitializer()) {
618  Assert(GV.getInitializer()->getType() == GV.getValueType(),
619  "Global variable initializer type does not match global "
620  "variable type!",
621  &GV);
622  // If the global has common linkage, it must have a zero initializer and
623  // cannot be constant.
624  if (GV.hasCommonLinkage()) {
626  "'common' global must have a zero initializer!", &GV);
627  Assert(!GV.isConstant(), "'common' global may not be marked constant!",
628  &GV);
629  Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
630  }
631  }
632 
633  if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
634  GV.getName() == "llvm.global_dtors")) {
636  "invalid linkage for intrinsic global variable", &GV);
637  // Don't worry about emitting an error for it not being an array,
638  // visitGlobalValue will complain on appending non-array.
639  if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
640  StructType *STy = dyn_cast<StructType>(ATy->getElementType());
641  PointerType *FuncPtrTy =
643  getPointerTo(DL.getProgramAddressSpace());
644  // FIXME: Reject the 2-field form in LLVM 4.0.
645  Assert(STy &&
646  (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
647  STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
648  STy->getTypeAtIndex(1) == FuncPtrTy,
649  "wrong type for intrinsic global variable", &GV);
650  if (STy->getNumElements() == 3) {
651  Type *ETy = STy->getTypeAtIndex(2);
652  Assert(ETy->isPointerTy() &&
653  cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
654  "wrong type for intrinsic global variable", &GV);
655  }
656  }
657  }
658 
659  if (GV.hasName() && (GV.getName() == "llvm.used" ||
660  GV.getName() == "llvm.compiler.used")) {
662  "invalid linkage for intrinsic global variable", &GV);
663  Type *GVType = GV.getValueType();
664  if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
665  PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
666  Assert(PTy, "wrong type for intrinsic global variable", &GV);
667  if (GV.hasInitializer()) {
668  const Constant *Init = GV.getInitializer();
669  const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
670  Assert(InitArray, "wrong initalizer for intrinsic global variable",
671  Init);
672  for (Value *Op : InitArray->operands()) {
673  Value *V = Op->stripPointerCastsNoFollowAliases();
674  Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
675  isa<GlobalAlias>(V),
676  "invalid llvm.used member", V);
677  Assert(V->hasName(), "members of llvm.used must be named", V);
678  }
679  }
680  }
681  }
682 
683  // Visit any debug info attachments.
686  for (auto *MD : MDs) {
687  if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
688  visitDIGlobalVariableExpression(*GVE);
689  else
690  AssertDI(false, "!dbg attachment of global variable must be a "
691  "DIGlobalVariableExpression");
692  }
693 
694  if (!GV.hasInitializer()) {
695  visitGlobalValue(GV);
696  return;
697  }
698 
699  // Walk any aggregate initializers looking for bitcasts between address spaces
700  visitConstantExprsRecursively(GV.getInitializer());
701 
702  visitGlobalValue(GV);
703 }
704 
705 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
707  Visited.insert(&GA);
708  visitAliaseeSubExpr(Visited, GA, C);
709 }
710 
711 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
712  const GlobalAlias &GA, const Constant &C) {
713  if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
714  Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
715  &GA);
716 
717  if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
718  Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
719 
720  Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
721  &GA);
722  } else {
723  // Only continue verifying subexpressions of GlobalAliases.
724  // Do not recurse into global initializers.
725  return;
726  }
727  }
728 
729  if (const auto *CE = dyn_cast<ConstantExpr>(&C))
730  visitConstantExprsRecursively(CE);
731 
732  for (const Use &U : C.operands()) {
733  Value *V = &*U;
734  if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
735  visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
736  else if (const auto *C2 = dyn_cast<Constant>(V))
737  visitAliaseeSubExpr(Visited, GA, *C2);
738  }
739 }
740 
741 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
743  "Alias should have private, internal, linkonce, weak, linkonce_odr, "
744  "weak_odr, or external linkage!",
745  &GA);
746  const Constant *Aliasee = GA.getAliasee();
747  Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
748  Assert(GA.getType() == Aliasee->getType(),
749  "Alias and aliasee types should match!", &GA);
750 
751  Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
752  "Aliasee should be either GlobalValue or ConstantExpr", &GA);
753 
754  visitAliaseeSubExpr(GA, *Aliasee);
755 
756  visitGlobalValue(GA);
757 }
758 
759 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
760  // There used to be various other llvm.dbg.* nodes, but we don't support
761  // upgrading them and we want to reserve the namespace for future uses.
762  if (NMD.getName().startswith("llvm.dbg."))
763  AssertDI(NMD.getName() == "llvm.dbg.cu",
764  "unrecognized named metadata node in the llvm.dbg namespace",
765  &NMD);
766  for (const MDNode *MD : NMD.operands()) {
767  if (NMD.getName() == "llvm.dbg.cu")
768  AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
769 
770  if (!MD)
771  continue;
772 
773  visitMDNode(*MD);
774  }
775 }
776 
777 void Verifier::visitMDNode(const MDNode &MD) {
778  // Only visit each node once. Metadata can be mutually recursive, so this
779  // avoids infinite recursion here, as well as being an optimization.
780  if (!MDNodes.insert(&MD).second)
781  return;
782 
783  switch (MD.getMetadataID()) {
784  default:
785  llvm_unreachable("Invalid MDNode subclass");
786  case Metadata::MDTupleKind:
787  break;
788 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
789  case Metadata::CLASS##Kind: \
790  visit##CLASS(cast<CLASS>(MD)); \
791  break;
792 #include "llvm/IR/Metadata.def"
793  }
794 
795  for (const Metadata *Op : MD.operands()) {
796  if (!Op)
797  continue;
798  Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
799  &MD, Op);
800  if (auto *N = dyn_cast<MDNode>(Op)) {
801  visitMDNode(*N);
802  continue;
803  }
804  if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
805  visitValueAsMetadata(*V, nullptr);
806  continue;
807  }
808  }
809 
810  // Check these last, so we diagnose problems in operands first.
811  Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
812  Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
813 }
814 
815 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
816  Assert(MD.getValue(), "Expected valid value", &MD);
817  Assert(!MD.getValue()->getType()->isMetadataTy(),
818  "Unexpected metadata round-trip through values", &MD, MD.getValue());
819 
820  auto *L = dyn_cast<LocalAsMetadata>(&MD);
821  if (!L)
822  return;
823 
824  Assert(F, "function-local metadata used outside a function", L);
825 
826  // If this was an instruction, bb, or argument, verify that it is in the
827  // function that we expect.
828  Function *ActualF = nullptr;
829  if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
830  Assert(I->getParent(), "function-local metadata not in basic block", L, I);
831  ActualF = I->getParent()->getParent();
832  } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
833  ActualF = BB->getParent();
834  else if (Argument *A = dyn_cast<Argument>(L->getValue()))
835  ActualF = A->getParent();
836  assert(ActualF && "Unimplemented function local metadata case!");
837 
838  Assert(ActualF == F, "function-local metadata used in wrong function", L);
839 }
840 
841 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
842  Metadata *MD = MDV.getMetadata();
843  if (auto *N = dyn_cast<MDNode>(MD)) {
844  visitMDNode(*N);
845  return;
846  }
847 
848  // Only visit each node once. Metadata can be mutually recursive, so this
849  // avoids infinite recursion here, as well as being an optimization.
850  if (!MDNodes.insert(MD).second)
851  return;
852 
853  if (auto *V = dyn_cast<ValueAsMetadata>(MD))
854  visitValueAsMetadata(*V, F);
855 }
856 
857 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
858 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
859 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
860 
861 void Verifier::visitDILocation(const DILocation &N) {
862  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
863  "location requires a valid scope", &N, N.getRawScope());
864  if (auto *IA = N.getRawInlinedAt())
865  AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
866  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
867  AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
868 }
869 
870 void Verifier::visitGenericDINode(const GenericDINode &N) {
871  AssertDI(N.getTag(), "invalid tag", &N);
872 }
873 
874 void Verifier::visitDIScope(const DIScope &N) {
875  if (auto *F = N.getRawFile())
876  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
877 }
878 
879 void Verifier::visitDISubrange(const DISubrange &N) {
880  AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
881  auto Count = N.getCount();
882  AssertDI(Count, "Count must either be a signed constant or a DIVariable",
883  &N);
884  AssertDI(!Count.is<ConstantInt*>() ||
885  Count.get<ConstantInt*>()->getSExtValue() >= -1,
886  "invalid subrange count", &N);
887 }
888 
889 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
890  AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
891 }
892 
893 void Verifier::visitDIBasicType(const DIBasicType &N) {
894  AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
895  N.getTag() == dwarf::DW_TAG_unspecified_type,
896  "invalid tag", &N);
897  AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
898  "has conflicting flags", &N);
899 }
900 
901 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
902  // Common scope checks.
903  visitDIScope(N);
904 
905  AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
906  N.getTag() == dwarf::DW_TAG_pointer_type ||
907  N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
908  N.getTag() == dwarf::DW_TAG_reference_type ||
909  N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
910  N.getTag() == dwarf::DW_TAG_const_type ||
911  N.getTag() == dwarf::DW_TAG_volatile_type ||
912  N.getTag() == dwarf::DW_TAG_restrict_type ||
913  N.getTag() == dwarf::DW_TAG_atomic_type ||
914  N.getTag() == dwarf::DW_TAG_member ||
915  N.getTag() == dwarf::DW_TAG_inheritance ||
916  N.getTag() == dwarf::DW_TAG_friend,
917  "invalid tag", &N);
918  if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
919  AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
920  N.getRawExtraData());
921  }
922 
923  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
924  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
925  N.getRawBaseType());
926 
927  if (N.getDWARFAddressSpace()) {
928  AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
929  N.getTag() == dwarf::DW_TAG_reference_type,
930  "DWARF address space only applies to pointer or reference types",
931  &N);
932  }
933 }
934 
935 /// Detect mutually exclusive flags.
936 static bool hasConflictingReferenceFlags(unsigned Flags) {
937  return ((Flags & DINode::FlagLValueReference) &&
938  (Flags & DINode::FlagRValueReference)) ||
939  ((Flags & DINode::FlagTypePassByValue) &&
940  (Flags & DINode::FlagTypePassByReference));
941 }
942 
943 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
944  auto *Params = dyn_cast<MDTuple>(&RawParams);
945  AssertDI(Params, "invalid template params", &N, &RawParams);
946  for (Metadata *Op : Params->operands()) {
947  AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
948  &N, Params, Op);
949  }
950 }
951 
952 void Verifier::visitDICompositeType(const DICompositeType &N) {
953  // Common scope checks.
954  visitDIScope(N);
955 
956  AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
957  N.getTag() == dwarf::DW_TAG_structure_type ||
958  N.getTag() == dwarf::DW_TAG_union_type ||
959  N.getTag() == dwarf::DW_TAG_enumeration_type ||
960  N.getTag() == dwarf::DW_TAG_class_type ||
961  N.getTag() == dwarf::DW_TAG_variant_part,
962  "invalid tag", &N);
963 
964  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
965  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
966  N.getRawBaseType());
967 
968  AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
969  "invalid composite elements", &N, N.getRawElements());
970  AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
971  N.getRawVTableHolder());
973  "invalid reference flags", &N);
974 
975  if (N.isVector()) {
976  const DINodeArray Elements = N.getElements();
977  AssertDI(Elements.size() == 1 &&
978  Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
979  "invalid vector, expected one element of type subrange", &N);
980  }
981 
982  if (auto *Params = N.getRawTemplateParams())
983  visitTemplateParams(N, *Params);
984 
985  if (N.getTag() == dwarf::DW_TAG_class_type ||
986  N.getTag() == dwarf::DW_TAG_union_type) {
987  AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
988  "class/union requires a filename", &N, N.getFile());
989  }
990 
991  if (auto *D = N.getRawDiscriminator()) {
992  AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
993  "discriminator can only appear on variant part");
994  }
995 }
996 
997 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
998  AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
999  if (auto *Types = N.getRawTypeArray()) {
1000  AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1001  for (Metadata *Ty : N.getTypeArray()->operands()) {
1002  AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1003  }
1004  }
1006  "invalid reference flags", &N);
1007 }
1008 
1009 void Verifier::visitDIFile(const DIFile &N) {
1010  AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1011  Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1012  if (Checksum) {
1013  AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1014  "invalid checksum kind", &N);
1015  size_t Size;
1016  switch (Checksum->Kind) {
1017  case DIFile::CSK_MD5:
1018  Size = 32;
1019  break;
1020  case DIFile::CSK_SHA1:
1021  Size = 40;
1022  break;
1023  }
1024  AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1025  AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1026  "invalid checksum", &N);
1027  }
1028 }
1029 
1030 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1031  AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1032  AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1033 
1034  // Don't bother verifying the compilation directory or producer string
1035  // as those could be empty.
1036  AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1037  N.getRawFile());
1038  AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1039  N.getFile());
1040 
1041  verifySourceDebugInfo(N, *N.getFile());
1042 
1044  "invalid emission kind", &N);
1045 
1046  if (auto *Array = N.getRawEnumTypes()) {
1047  AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1048  for (Metadata *Op : N.getEnumTypes()->operands()) {
1049  auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1050  AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1051  "invalid enum type", &N, N.getEnumTypes(), Op);
1052  }
1053  }
1054  if (auto *Array = N.getRawRetainedTypes()) {
1055  AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1056  for (Metadata *Op : N.getRetainedTypes()->operands()) {
1057  AssertDI(Op && (isa<DIType>(Op) ||
1058  (isa<DISubprogram>(Op) &&
1059  !cast<DISubprogram>(Op)->isDefinition())),
1060  "invalid retained type", &N, Op);
1061  }
1062  }
1063  if (auto *Array = N.getRawGlobalVariables()) {
1064  AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1065  for (Metadata *Op : N.getGlobalVariables()->operands()) {
1066  AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1067  "invalid global variable ref", &N, Op);
1068  }
1069  }
1070  if (auto *Array = N.getRawImportedEntities()) {
1071  AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1072  for (Metadata *Op : N.getImportedEntities()->operands()) {
1073  AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1074  &N, Op);
1075  }
1076  }
1077  if (auto *Array = N.getRawMacros()) {
1078  AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1079  for (Metadata *Op : N.getMacros()->operands()) {
1080  AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1081  }
1082  }
1083  CUVisited.insert(&N);
1084 }
1085 
1086 void Verifier::visitDISubprogram(const DISubprogram &N) {
1087  AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1088  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1089  if (auto *F = N.getRawFile())
1090  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1091  else
1092  AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1093  if (auto *T = N.getRawType())
1094  AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1095  AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1096  N.getRawContainingType());
1097  if (auto *Params = N.getRawTemplateParams())
1098  visitTemplateParams(N, *Params);
1099  if (auto *S = N.getRawDeclaration())
1100  AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1101  "invalid subprogram declaration", &N, S);
1102  if (auto *RawNode = N.getRawRetainedNodes()) {
1103  auto *Node = dyn_cast<MDTuple>(RawNode);
1104  AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1105  for (Metadata *Op : Node->operands()) {
1106  AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1107  "invalid retained nodes, expected DILocalVariable or DILabel",
1108  &N, Node, Op);
1109  }
1110  }
1111  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1112  "invalid reference flags", &N);
1113 
1114  auto *Unit = N.getRawUnit();
1115  if (N.isDefinition()) {
1116  // Subprogram definitions (not part of the type hierarchy).
1117  AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1118  AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1119  AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1120  if (N.getFile())
1121  verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1122  } else {
1123  // Subprogram declarations (part of the type hierarchy).
1124  AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1125  }
1126 
1127  if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1128  auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1129  AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1130  for (Metadata *Op : ThrownTypes->operands())
1131  AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1132  Op);
1133  }
1134 
1135  if (N.areAllCallsDescribed())
1136  AssertDI(N.isDefinition(),
1137  "DIFlagAllCallsDescribed must be attached to a definition");
1138 }
1139 
1140 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1141  AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1142  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1143  "invalid local scope", &N, N.getRawScope());
1144  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1145  AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1146 }
1147 
1148 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1149  visitDILexicalBlockBase(N);
1150 
1151  AssertDI(N.getLine() || !N.getColumn(),
1152  "cannot have column info without line info", &N);
1153 }
1154 
1155 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1156  visitDILexicalBlockBase(N);
1157 }
1158 
1159 void Verifier::visitDINamespace(const DINamespace &N) {
1160  AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1161  if (auto *S = N.getRawScope())
1162  AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1163 }
1164 
1165 void Verifier::visitDIMacro(const DIMacro &N) {
1168  "invalid macinfo type", &N);
1169  AssertDI(!N.getName().empty(), "anonymous macro", &N);
1170  if (!N.getValue().empty()) {
1171  assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1172  }
1173 }
1174 
1175 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1177  "invalid macinfo type", &N);
1178  if (auto *F = N.getRawFile())
1179  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1180 
1181  if (auto *Array = N.getRawElements()) {
1182  AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1183  for (Metadata *Op : N.getElements()->operands()) {
1184  AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1185  }
1186  }
1187 }
1188 
1189 void Verifier::visitDIModule(const DIModule &N) {
1190  AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1191  AssertDI(!N.getName().empty(), "anonymous module", &N);
1192 }
1193 
1194 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1195  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1196 }
1197 
1198 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1199  visitDITemplateParameter(N);
1200 
1201  AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1202  &N);
1203 }
1204 
1205 void Verifier::visitDITemplateValueParameter(
1206  const DITemplateValueParameter &N) {
1207  visitDITemplateParameter(N);
1208 
1209  AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1210  N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1211  N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1212  "invalid tag", &N);
1213 }
1214 
1215 void Verifier::visitDIVariable(const DIVariable &N) {
1216  if (auto *S = N.getRawScope())
1217  AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1218  if (auto *F = N.getRawFile())
1219  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1220 }
1221 
1222 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1223  // Checks common to all variables.
1224  visitDIVariable(N);
1225 
1226  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1227  AssertDI(!N.getName().empty(), "missing global variable name", &N);
1228  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1229  AssertDI(N.getType(), "missing global variable type", &N);
1230  if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1231  AssertDI(isa<DIDerivedType>(Member),
1232  "invalid static data member declaration", &N, Member);
1233  }
1234 }
1235 
1236 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1237  // Checks common to all variables.
1238  visitDIVariable(N);
1239 
1240  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1241  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1242  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1243  "local variable requires a valid scope", &N, N.getRawScope());
1244  if (auto Ty = N.getType())
1245  AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1246 }
1247 
1248 void Verifier::visitDILabel(const DILabel &N) {
1249  if (auto *S = N.getRawScope())
1250  AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1251  if (auto *F = N.getRawFile())
1252  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1253 
1254  AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1255  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1256  "label requires a valid scope", &N, N.getRawScope());
1257 }
1258 
1259 void Verifier::visitDIExpression(const DIExpression &N) {
1260  AssertDI(N.isValid(), "invalid expression", &N);
1261 }
1262 
1263 void Verifier::visitDIGlobalVariableExpression(
1264  const DIGlobalVariableExpression &GVE) {
1265  AssertDI(GVE.getVariable(), "missing variable");
1266  if (auto *Var = GVE.getVariable())
1267  visitDIGlobalVariable(*Var);
1268  if (auto *Expr = GVE.getExpression()) {
1269  visitDIExpression(*Expr);
1270  if (auto Fragment = Expr->getFragmentInfo())
1271  verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1272  }
1273 }
1274 
1275 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1276  AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1277  if (auto *T = N.getRawType())
1278  AssertDI(isType(T), "invalid type ref", &N, T);
1279  if (auto *F = N.getRawFile())
1280  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1281 }
1282 
1283 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1284  AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1285  N.getTag() == dwarf::DW_TAG_imported_declaration,
1286  "invalid tag", &N);
1287  if (auto *S = N.getRawScope())
1288  AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1289  AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1290  N.getRawEntity());
1291 }
1292 
1293 void Verifier::visitComdat(const Comdat &C) {
1294  // The Module is invalid if the GlobalValue has private linkage. Entities
1295  // with private linkage don't have entries in the symbol table.
1296  if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1297  Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1298  GV);
1299 }
1300 
1301 void Verifier::visitModuleIdents(const Module &M) {
1302  const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1303  if (!Idents)
1304  return;
1305 
1306  // llvm.ident takes a list of metadata entry. Each entry has only one string.
1307  // Scan each llvm.ident entry and make sure that this requirement is met.
1308  for (const MDNode *N : Idents->operands()) {
1309  Assert(N->getNumOperands() == 1,
1310  "incorrect number of operands in llvm.ident metadata", N);
1311  Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1312  ("invalid value for llvm.ident metadata entry operand"
1313  "(the operand should be a string)"),
1314  N->getOperand(0));
1315  }
1316 }
1317 
1318 void Verifier::visitModuleCommandLines(const Module &M) {
1319  const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1320  if (!CommandLines)
1321  return;
1322 
1323  // llvm.commandline takes a list of metadata entry. Each entry has only one
1324  // string. Scan each llvm.commandline entry and make sure that this
1325  // requirement is met.
1326  for (const MDNode *N : CommandLines->operands()) {
1327  Assert(N->getNumOperands() == 1,
1328  "incorrect number of operands in llvm.commandline metadata", N);
1329  Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1330  ("invalid value for llvm.commandline metadata entry operand"
1331  "(the operand should be a string)"),
1332  N->getOperand(0));
1333  }
1334 }
1335 
1336 void Verifier::visitModuleFlags(const Module &M) {
1337  const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1338  if (!Flags) return;
1339 
1340  // Scan each flag, and track the flags and requirements.
1342  SmallVector<const MDNode*, 16> Requirements;
1343  for (const MDNode *MDN : Flags->operands())
1344  visitModuleFlag(MDN, SeenIDs, Requirements);
1345 
1346  // Validate that the requirements in the module are valid.
1347  for (const MDNode *Requirement : Requirements) {
1348  const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1349  const Metadata *ReqValue = Requirement->getOperand(1);
1350 
1351  const MDNode *Op = SeenIDs.lookup(Flag);
1352  if (!Op) {
1353  CheckFailed("invalid requirement on flag, flag is not present in module",
1354  Flag);
1355  continue;
1356  }
1357 
1358  if (Op->getOperand(2) != ReqValue) {
1359  CheckFailed(("invalid requirement on flag, "
1360  "flag does not have the required value"),
1361  Flag);
1362  continue;
1363  }
1364  }
1365 }
1366 
1367 void
1368 Verifier::visitModuleFlag(const MDNode *Op,
1370  SmallVectorImpl<const MDNode *> &Requirements) {
1371  // Each module flag should have three arguments, the merge behavior (a
1372  // constant int), the flag ID (an MDString), and the value.
1373  Assert(Op->getNumOperands() == 3,
1374  "incorrect number of operands in module flag", Op);
1376  if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1377  Assert(
1378  mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1379  "invalid behavior operand in module flag (expected constant integer)",
1380  Op->getOperand(0));
1381  Assert(false,
1382  "invalid behavior operand in module flag (unexpected constant)",
1383  Op->getOperand(0));
1384  }
1385  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1386  Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1387  Op->getOperand(1));
1388 
1389  // Sanity check the values for behaviors with additional requirements.
1390  switch (MFB) {
1391  case Module::Error:
1392  case Module::Warning:
1393  case Module::Override:
1394  // These behavior types accept any value.
1395  break;
1396 
1397  case Module::Max: {
1398  Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1399  "invalid value for 'max' module flag (expected constant integer)",
1400  Op->getOperand(2));
1401  break;
1402  }
1403 
1404  case Module::Require: {
1405  // The value should itself be an MDNode with two operands, a flag ID (an
1406  // MDString), and a value.
1407  MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1408  Assert(Value && Value->getNumOperands() == 2,
1409  "invalid value for 'require' module flag (expected metadata pair)",
1410  Op->getOperand(2));
1411  Assert(isa<MDString>(Value->getOperand(0)),
1412  ("invalid value for 'require' module flag "
1413  "(first value operand should be a string)"),
1414  Value->getOperand(0));
1415 
1416  // Append it to the list of requirements, to check once all module flags are
1417  // scanned.
1418  Requirements.push_back(Value);
1419  break;
1420  }
1421 
1422  case Module::Append:
1423  case Module::AppendUnique: {
1424  // These behavior types require the operand be an MDNode.
1425  Assert(isa<MDNode>(Op->getOperand(2)),
1426  "invalid value for 'append'-type module flag "
1427  "(expected a metadata node)",
1428  Op->getOperand(2));
1429  break;
1430  }
1431  }
1432 
1433  // Unless this is a "requires" flag, check the ID is unique.
1434  if (MFB != Module::Require) {
1435  bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1436  Assert(Inserted,
1437  "module flag identifiers must be unique (or of 'require' type)", ID);
1438  }
1439 
1440  if (ID->getString() == "wchar_size") {
1442  = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1443  Assert(Value, "wchar_size metadata requires constant integer argument");
1444  }
1445 
1446  if (ID->getString() == "Linker Options") {
1447  // If the llvm.linker.options named metadata exists, we assume that the
1448  // bitcode reader has upgraded the module flag. Otherwise the flag might
1449  // have been created by a client directly.
1450  Assert(M.getNamedMetadata("llvm.linker.options"),
1451  "'Linker Options' named metadata no longer supported");
1452  }
1453 
1454  if (ID->getString() == "CG Profile") {
1455  for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1456  visitModuleFlagCGProfileEntry(MDO);
1457  }
1458 }
1459 
1460 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1461  auto CheckFunction = [&](const MDOperand &FuncMDO) {
1462  if (!FuncMDO)
1463  return;
1464  auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1465  Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1466  FuncMDO);
1467  };
1468  auto Node = dyn_cast_or_null<MDNode>(MDO);
1469  Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1470  CheckFunction(Node->getOperand(0));
1471  CheckFunction(Node->getOperand(1));
1472  auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1473  Assert(Count && Count->getType()->isIntegerTy(),
1474  "expected an integer constant", Node->getOperand(2));
1475 }
1476 
1477 /// Return true if this attribute kind only applies to functions.
1479  switch (Kind) {
1480  case Attribute::NoReturn:
1481  case Attribute::NoCfCheck:
1482  case Attribute::NoUnwind:
1483  case Attribute::NoInline:
1484  case Attribute::AlwaysInline:
1485  case Attribute::OptimizeForSize:
1486  case Attribute::StackProtect:
1487  case Attribute::StackProtectReq:
1488  case Attribute::StackProtectStrong:
1489  case Attribute::SafeStack:
1490  case Attribute::ShadowCallStack:
1491  case Attribute::NoRedZone:
1492  case Attribute::NoImplicitFloat:
1493  case Attribute::Naked:
1494  case Attribute::InlineHint:
1495  case Attribute::StackAlignment:
1496  case Attribute::UWTable:
1497  case Attribute::NonLazyBind:
1498  case Attribute::ReturnsTwice:
1499  case Attribute::SanitizeAddress:
1500  case Attribute::SanitizeHWAddress:
1501  case Attribute::SanitizeThread:
1502  case Attribute::SanitizeMemory:
1503  case Attribute::MinSize:
1504  case Attribute::NoDuplicate:
1505  case Attribute::Builtin:
1506  case Attribute::NoBuiltin:
1507  case Attribute::Cold:
1508  case Attribute::OptForFuzzing:
1509  case Attribute::OptimizeNone:
1510  case Attribute::JumpTable:
1511  case Attribute::Convergent:
1512  case Attribute::ArgMemOnly:
1513  case Attribute::NoRecurse:
1514  case Attribute::InaccessibleMemOnly:
1515  case Attribute::InaccessibleMemOrArgMemOnly:
1516  case Attribute::AllocSize:
1517  case Attribute::SpeculativeLoadHardening:
1518  case Attribute::Speculatable:
1519  case Attribute::StrictFP:
1520  return true;
1521  default:
1522  break;
1523  }
1524  return false;
1525 }
1526 
1527 /// Return true if this is a function attribute that can also appear on
1528 /// arguments.
1530  return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1531  Kind == Attribute::ReadNone;
1532 }
1533 
1534 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1535  const Value *V) {
1536  for (Attribute A : Attrs) {
1537  if (A.isStringAttribute())
1538  continue;
1539 
1540  if (isFuncOnlyAttr(A.getKindAsEnum())) {
1541  if (!IsFunction) {
1542  CheckFailed("Attribute '" + A.getAsString() +
1543  "' only applies to functions!",
1544  V);
1545  return;
1546  }
1547  } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1548  CheckFailed("Attribute '" + A.getAsString() +
1549  "' does not apply to functions!",
1550  V);
1551  return;
1552  }
1553  }
1554 }
1555 
1556 // VerifyParameterAttrs - Check the given attributes for an argument or return
1557 // value of the specified type. The value V is printed in error messages.
1558 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1559  const Value *V) {
1560  if (!Attrs.hasAttributes())
1561  return;
1562 
1563  verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1564 
1565  if (Attrs.hasAttribute(Attribute::ImmArg)) {
1566  Assert(Attrs.getNumAttributes() == 1,
1567  "Attribute 'immarg' is incompatible with other attributes", V);
1568  }
1569 
1570  // Check for mutually incompatible attributes. Only inreg is compatible with
1571  // sret.
1572  unsigned AttrCount = 0;
1573  AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1574  AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1575  AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1576  Attrs.hasAttribute(Attribute::InReg);
1577  AttrCount += Attrs.hasAttribute(Attribute::Nest);
1578  Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1579  "and 'sret' are incompatible!",
1580  V);
1581 
1582  Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1583  Attrs.hasAttribute(Attribute::ReadOnly)),
1584  "Attributes "
1585  "'inalloca and readonly' are incompatible!",
1586  V);
1587 
1588  Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1589  Attrs.hasAttribute(Attribute::Returned)),
1590  "Attributes "
1591  "'sret and returned' are incompatible!",
1592  V);
1593 
1594  Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1595  Attrs.hasAttribute(Attribute::SExt)),
1596  "Attributes "
1597  "'zeroext and signext' are incompatible!",
1598  V);
1599 
1600  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1601  Attrs.hasAttribute(Attribute::ReadOnly)),
1602  "Attributes "
1603  "'readnone and readonly' are incompatible!",
1604  V);
1605 
1606  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1607  Attrs.hasAttribute(Attribute::WriteOnly)),
1608  "Attributes "
1609  "'readnone and writeonly' are incompatible!",
1610  V);
1611 
1612  Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1613  Attrs.hasAttribute(Attribute::WriteOnly)),
1614  "Attributes "
1615  "'readonly and writeonly' are incompatible!",
1616  V);
1617 
1618  Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1619  Attrs.hasAttribute(Attribute::AlwaysInline)),
1620  "Attributes "
1621  "'noinline and alwaysinline' are incompatible!",
1622  V);
1623 
1624  AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1625  Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1626  "Wrong types for attribute: " +
1627  AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1628  V);
1629 
1630  if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1631  SmallPtrSet<Type*, 4> Visited;
1632  if (!PTy->getElementType()->isSized(&Visited)) {
1633  Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1634  !Attrs.hasAttribute(Attribute::InAlloca),
1635  "Attributes 'byval' and 'inalloca' do not support unsized types!",
1636  V);
1637  }
1638  if (!isa<PointerType>(PTy->getElementType()))
1639  Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1640  "Attribute 'swifterror' only applies to parameters "
1641  "with pointer to pointer type!",
1642  V);
1643  } else {
1644  Assert(!Attrs.hasAttribute(Attribute::ByVal),
1645  "Attribute 'byval' only applies to parameters with pointer type!",
1646  V);
1647  Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1648  "Attribute 'swifterror' only applies to parameters "
1649  "with pointer type!",
1650  V);
1651  }
1652 }
1653 
1654 // Check parameter attributes against a function type.
1655 // The value V is printed in error messages.
1656 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1657  const Value *V, bool IsIntrinsic) {
1658  if (Attrs.isEmpty())
1659  return;
1660 
1661  bool SawNest = false;
1662  bool SawReturned = false;
1663  bool SawSRet = false;
1664  bool SawSwiftSelf = false;
1665  bool SawSwiftError = false;
1666 
1667  // Verify return value attributes.
1668  AttributeSet RetAttrs = Attrs.getRetAttributes();
1669  Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1670  !RetAttrs.hasAttribute(Attribute::Nest) &&
1671  !RetAttrs.hasAttribute(Attribute::StructRet) &&
1672  !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1673  !RetAttrs.hasAttribute(Attribute::Returned) &&
1674  !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1675  !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1676  !RetAttrs.hasAttribute(Attribute::SwiftError)),
1677  "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
1678  "'returned', 'swiftself', and 'swifterror' do not apply to return "
1679  "values!",
1680  V);
1681  Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1682  !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1683  !RetAttrs.hasAttribute(Attribute::ReadNone)),
1684  "Attribute '" + RetAttrs.getAsString() +
1685  "' does not apply to function returns",
1686  V);
1687  verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1688 
1689  // Verify parameter attributes.
1690  for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1691  Type *Ty = FT->getParamType(i);
1692  AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1693 
1694  if (!IsIntrinsic) {
1695  Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg),
1696  "immarg attribute only applies to intrinsics",V);
1697  }
1698 
1699  verifyParameterAttrs(ArgAttrs, Ty, V);
1700 
1701  if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1702  Assert(!SawNest, "More than one parameter has attribute nest!", V);
1703  SawNest = true;
1704  }
1705 
1706  if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1707  Assert(!SawReturned, "More than one parameter has attribute returned!",
1708  V);
1710  "Incompatible argument and return types for 'returned' attribute",
1711  V);
1712  SawReturned = true;
1713  }
1714 
1715  if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1716  Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1717  Assert(i == 0 || i == 1,
1718  "Attribute 'sret' is not on first or second parameter!", V);
1719  SawSRet = true;
1720  }
1721 
1722  if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1723  Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1724  SawSwiftSelf = true;
1725  }
1726 
1727  if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1728  Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1729  V);
1730  SawSwiftError = true;
1731  }
1732 
1733  if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1734  Assert(i == FT->getNumParams() - 1,
1735  "inalloca isn't on the last parameter!", V);
1736  }
1737  }
1738 
1740  return;
1741 
1742  verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1743 
1744  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1745  Attrs.hasFnAttribute(Attribute::ReadOnly)),
1746  "Attributes 'readnone and readonly' are incompatible!", V);
1747 
1748  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1749  Attrs.hasFnAttribute(Attribute::WriteOnly)),
1750  "Attributes 'readnone and writeonly' are incompatible!", V);
1751 
1752  Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1753  Attrs.hasFnAttribute(Attribute::WriteOnly)),
1754  "Attributes 'readonly and writeonly' are incompatible!", V);
1755 
1756  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1757  Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1758  "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1759  "incompatible!",
1760  V);
1761 
1762  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1763  Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1764  "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1765 
1766  Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1767  Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1768  "Attributes 'noinline and alwaysinline' are incompatible!", V);
1769 
1770  if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1771  Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1772  "Attribute 'optnone' requires 'noinline'!", V);
1773 
1774  Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1775  "Attributes 'optsize and optnone' are incompatible!", V);
1776 
1777  Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1778  "Attributes 'minsize and optnone' are incompatible!", V);
1779  }
1780 
1781  if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1782  const GlobalValue *GV = cast<GlobalValue>(V);
1784  "Attribute 'jumptable' requires 'unnamed_addr'", V);
1785  }
1786 
1787  if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1788  std::pair<unsigned, Optional<unsigned>> Args =
1790 
1791  auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1792  if (ParamNo >= FT->getNumParams()) {
1793  CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1794  return false;
1795  }
1796 
1797  if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1798  CheckFailed("'allocsize' " + Name +
1799  " argument must refer to an integer parameter",
1800  V);
1801  return false;
1802  }
1803 
1804  return true;
1805  };
1806 
1807  if (!CheckParam("element size", Args.first))
1808  return;
1809 
1810  if (Args.second && !CheckParam("number of elements", *Args.second))
1811  return;
1812  }
1813 }
1814 
1815 void Verifier::verifyFunctionMetadata(
1816  ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1817  for (const auto &Pair : MDs) {
1818  if (Pair.first == LLVMContext::MD_prof) {
1819  MDNode *MD = Pair.second;
1820  Assert(MD->getNumOperands() >= 2,
1821  "!prof annotations should have no less than 2 operands", MD);
1822 
1823  // Check first operand.
1824  Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1825  MD);
1826  Assert(isa<MDString>(MD->getOperand(0)),
1827  "expected string with name of the !prof annotation", MD);
1828  MDString *MDS = cast<MDString>(MD->getOperand(0));
1829  StringRef ProfName = MDS->getString();
1830  Assert(ProfName.equals("function_entry_count") ||
1831  ProfName.equals("synthetic_function_entry_count"),
1832  "first operand should be 'function_entry_count'"
1833  " or 'synthetic_function_entry_count'",
1834  MD);
1835 
1836  // Check second operand.
1837  Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1838  MD);
1839  Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1840  "expected integer argument to function_entry_count", MD);
1841  }
1842  }
1843 }
1844 
1845 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1846  if (!ConstantExprVisited.insert(EntryC).second)
1847  return;
1848 
1850  Stack.push_back(EntryC);
1851 
1852  while (!Stack.empty()) {
1853  const Constant *C = Stack.pop_back_val();
1854 
1855  // Check this constant expression.
1856  if (const auto *CE = dyn_cast<ConstantExpr>(C))
1857  visitConstantExpr(CE);
1858 
1859  if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1860  // Global Values get visited separately, but we do need to make sure
1861  // that the global value is in the correct module
1862  Assert(GV->getParent() == &M, "Referencing global in another module!",
1863  EntryC, &M, GV, GV->getParent());
1864  continue;
1865  }
1866 
1867  // Visit all sub-expressions.
1868  for (const Use &U : C->operands()) {
1869  const auto *OpC = dyn_cast<Constant>(U);
1870  if (!OpC)
1871  continue;
1872  if (!ConstantExprVisited.insert(OpC).second)
1873  continue;
1874  Stack.push_back(OpC);
1875  }
1876  }
1877 }
1878 
1879 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1880  if (CE->getOpcode() == Instruction::BitCast)
1881  Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1882  CE->getType()),
1883  "Invalid bitcast", CE);
1884 
1885  if (CE->getOpcode() == Instruction::IntToPtr ||
1886  CE->getOpcode() == Instruction::PtrToInt) {
1887  auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1888  ? CE->getType()
1889  : CE->getOperand(0)->getType();
1890  StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1891  ? "inttoptr not supported for non-integral pointers"
1892  : "ptrtoint not supported for non-integral pointers";
1893  Assert(
1894  !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1895  Msg);
1896  }
1897 }
1898 
1899 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1900  // There shouldn't be more attribute sets than there are parameters plus the
1901  // function and return value.
1902  return Attrs.getNumAttrSets() <= Params + 2;
1903 }
1904 
1905 /// Verify that statepoint intrinsic is well formed.
1906 void Verifier::verifyStatepoint(const CallBase &Call) {
1907  assert(Call.getCalledFunction() &&
1908  Call.getCalledFunction()->getIntrinsicID() ==
1909  Intrinsic::experimental_gc_statepoint);
1910 
1911  Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
1912  !Call.onlyAccessesArgMemory(),
1913  "gc.statepoint must read and write all memory to preserve "
1914  "reordering restrictions required by safepoint semantics",
1915  Call);
1916 
1917  const int64_t NumPatchBytes =
1918  cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
1919  assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1920  Assert(NumPatchBytes >= 0,
1921  "gc.statepoint number of patchable bytes must be "
1922  "positive",
1923  Call);
1924 
1925  const Value *Target = Call.getArgOperand(2);
1926  auto *PT = dyn_cast<PointerType>(Target->getType());
1927  Assert(PT && PT->getElementType()->isFunctionTy(),
1928  "gc.statepoint callee must be of function pointer type", Call, Target);
1929  FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1930 
1931  const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
1932  Assert(NumCallArgs >= 0,
1933  "gc.statepoint number of arguments to underlying call "
1934  "must be positive",
1935  Call);
1936  const int NumParams = (int)TargetFuncType->getNumParams();
1937  if (TargetFuncType->isVarArg()) {
1938  Assert(NumCallArgs >= NumParams,
1939  "gc.statepoint mismatch in number of vararg call args", Call);
1940 
1941  // TODO: Remove this limitation
1942  Assert(TargetFuncType->getReturnType()->isVoidTy(),
1943  "gc.statepoint doesn't support wrapping non-void "
1944  "vararg functions yet",
1945  Call);
1946  } else
1947  Assert(NumCallArgs == NumParams,
1948  "gc.statepoint mismatch in number of call args", Call);
1949 
1950  const uint64_t Flags
1951  = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
1952  Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
1953  "unknown flag used in gc.statepoint flags argument", Call);
1954 
1955  // Verify that the types of the call parameter arguments match
1956  // the type of the wrapped callee.
1957  AttributeList Attrs = Call.getAttributes();
1958  for (int i = 0; i < NumParams; i++) {
1959  Type *ParamType = TargetFuncType->getParamType(i);
1960  Type *ArgType = Call.getArgOperand(5 + i)->getType();
1961  Assert(ArgType == ParamType,
1962  "gc.statepoint call argument does not match wrapped "
1963  "function type",
1964  Call);
1965 
1966  if (TargetFuncType->isVarArg()) {
1967  AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
1968  Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
1969  "Attribute 'sret' cannot be used for vararg call arguments!",
1970  Call);
1971  }
1972  }
1973 
1974  const int EndCallArgsInx = 4 + NumCallArgs;
1975 
1976  const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
1977  Assert(isa<ConstantInt>(NumTransitionArgsV),
1978  "gc.statepoint number of transition arguments "
1979  "must be constant integer",
1980  Call);
1981  const int NumTransitionArgs =
1982  cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
1983  Assert(NumTransitionArgs >= 0,
1984  "gc.statepoint number of transition arguments must be positive", Call);
1985  const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
1986 
1987  const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
1988  Assert(isa<ConstantInt>(NumDeoptArgsV),
1989  "gc.statepoint number of deoptimization arguments "
1990  "must be constant integer",
1991  Call);
1992  const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
1993  Assert(NumDeoptArgs >= 0,
1994  "gc.statepoint number of deoptimization arguments "
1995  "must be positive",
1996  Call);
1997 
1998  const int ExpectedNumArgs =
1999  7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2000  Assert(ExpectedNumArgs <= (int)Call.arg_size(),
2001  "gc.statepoint too few arguments according to length fields", Call);
2002 
2003  // Check that the only uses of this gc.statepoint are gc.result or
2004  // gc.relocate calls which are tied to this statepoint and thus part
2005  // of the same statepoint sequence
2006  for (const User *U : Call.users()) {
2007  const CallInst *UserCall = dyn_cast<const CallInst>(U);
2008  Assert(UserCall, "illegal use of statepoint token", Call, U);
2009  if (!UserCall)
2010  continue;
2011  Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2012  "gc.result or gc.relocate are the only value uses "
2013  "of a gc.statepoint",
2014  Call, U);
2015  if (isa<GCResultInst>(UserCall)) {
2016  Assert(UserCall->getArgOperand(0) == &Call,
2017  "gc.result connected to wrong gc.statepoint", Call, UserCall);
2018  } else if (isa<GCRelocateInst>(Call)) {
2019  Assert(UserCall->getArgOperand(0) == &Call,
2020  "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2021  }
2022  }
2023 
2024  // Note: It is legal for a single derived pointer to be listed multiple
2025  // times. It's non-optimal, but it is legal. It can also happen after
2026  // insertion if we strip a bitcast away.
2027  // Note: It is really tempting to check that each base is relocated and
2028  // that a derived pointer is never reused as a base pointer. This turns
2029  // out to be problematic since optimizations run after safepoint insertion
2030  // can recognize equality properties that the insertion logic doesn't know
2031  // about. See example statepoint.ll in the verifier subdirectory
2032 }
2033 
2034 void Verifier::verifyFrameRecoverIndices() {
2035  for (auto &Counts : FrameEscapeInfo) {
2036  Function *F = Counts.first;
2037  unsigned EscapedObjectCount = Counts.second.first;
2038  unsigned MaxRecoveredIndex = Counts.second.second;
2039  Assert(MaxRecoveredIndex <= EscapedObjectCount,
2040  "all indices passed to llvm.localrecover must be less than the "
2041  "number of arguments passed to llvm.localescape in the parent "
2042  "function",
2043  F);
2044  }
2045 }
2046 
2048  BasicBlock *UnwindDest;
2049  if (auto *II = dyn_cast<InvokeInst>(Terminator))
2050  UnwindDest = II->getUnwindDest();
2051  else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2052  UnwindDest = CSI->getUnwindDest();
2053  else
2054  UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2055  return UnwindDest->getFirstNonPHI();
2056 }
2057 
2058 void Verifier::verifySiblingFuncletUnwinds() {
2061  for (const auto &Pair : SiblingFuncletInfo) {
2062  Instruction *PredPad = Pair.first;
2063  if (Visited.count(PredPad))
2064  continue;
2065  Active.insert(PredPad);
2066  Instruction *Terminator = Pair.second;
2067  do {
2068  Instruction *SuccPad = getSuccPad(Terminator);
2069  if (Active.count(SuccPad)) {
2070  // Found a cycle; report error
2071  Instruction *CyclePad = SuccPad;
2072  SmallVector<Instruction *, 8> CycleNodes;
2073  do {
2074  CycleNodes.push_back(CyclePad);
2075  Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2076  if (CycleTerminator != CyclePad)
2077  CycleNodes.push_back(CycleTerminator);
2078  CyclePad = getSuccPad(CycleTerminator);
2079  } while (CyclePad != SuccPad);
2080  Assert(false, "EH pads can't handle each other's exceptions",
2081  ArrayRef<Instruction *>(CycleNodes));
2082  }
2083  // Don't re-walk a node we've already checked
2084  if (!Visited.insert(SuccPad).second)
2085  break;
2086  // Walk to this successor if it has a map entry.
2087  PredPad = SuccPad;
2088  auto TermI = SiblingFuncletInfo.find(PredPad);
2089  if (TermI == SiblingFuncletInfo.end())
2090  break;
2091  Terminator = TermI->second;
2092  Active.insert(PredPad);
2093  } while (true);
2094  // Each node only has one successor, so we've walked all the active
2095  // nodes' successors.
2096  Active.clear();
2097  }
2098 }
2099 
2100 // visitFunction - Verify that a function is ok.
2101 //
2102 void Verifier::visitFunction(const Function &F) {
2103  visitGlobalValue(F);
2104 
2105  // Check function arguments.
2106  FunctionType *FT = F.getFunctionType();
2107  unsigned NumArgs = F.arg_size();
2108 
2109  Assert(&Context == &F.getContext(),
2110  "Function context does not match Module context!", &F);
2111 
2112  Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2113  Assert(FT->getNumParams() == NumArgs,
2114  "# formal arguments must match # of arguments for function type!", &F,
2115  FT);
2116  Assert(F.getReturnType()->isFirstClassType() ||
2117  F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2118  "Functions cannot return aggregate values!", &F);
2119 
2120  Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2121  "Invalid struct return type!", &F);
2122 
2123  AttributeList Attrs = F.getAttributes();
2124 
2125  Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2126  "Attribute after last parameter!", &F);
2127 
2128  bool isLLVMdotName = F.getName().size() >= 5 &&
2129  F.getName().substr(0, 5) == "llvm.";
2130 
2131  // Check function attributes.
2132  verifyFunctionAttrs(FT, Attrs, &F, isLLVMdotName);
2133 
2134  // On function declarations/definitions, we do not support the builtin
2135  // attribute. We do not check this in VerifyFunctionAttrs since that is
2136  // checking for Attributes that can/can not ever be on functions.
2137  Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2138  "Attribute 'builtin' can only be applied to a callsite.", &F);
2139 
2140  // Check that this function meets the restrictions on this calling convention.
2141  // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2142  // restrictions can be lifted.
2143  switch (F.getCallingConv()) {
2144  default:
2145  case CallingConv::C:
2146  break;
2149  Assert(F.getReturnType()->isVoidTy(),
2150  "Calling convention requires void return type", &F);
2157  Assert(!F.hasStructRetAttr(),
2158  "Calling convention does not allow sret", &F);
2160  case CallingConv::Fast:
2161  case CallingConv::Cold:
2165  Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2166  "perfect forwarding!",
2167  &F);
2168  break;
2169  }
2170 
2171  // Check that the argument values match the function type for this function...
2172  unsigned i = 0;
2173  for (const Argument &Arg : F.args()) {
2174  Assert(Arg.getType() == FT->getParamType(i),
2175  "Argument value does not match function argument type!", &Arg,
2176  FT->getParamType(i));
2177  Assert(Arg.getType()->isFirstClassType(),
2178  "Function arguments must have first-class types!", &Arg);
2179  if (!isLLVMdotName) {
2180  Assert(!Arg.getType()->isMetadataTy(),
2181  "Function takes metadata but isn't an intrinsic", &Arg, &F);
2182  Assert(!Arg.getType()->isTokenTy(),
2183  "Function takes token but isn't an intrinsic", &Arg, &F);
2184  }
2185 
2186  // Check that swifterror argument is only used by loads and stores.
2187  if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2188  verifySwiftErrorValue(&Arg);
2189  }
2190  ++i;
2191  }
2192 
2193  if (!isLLVMdotName)
2194  Assert(!F.getReturnType()->isTokenTy(),
2195  "Functions returns a token but isn't an intrinsic", &F);
2196 
2197  // Get the function metadata attachments.
2199  F.getAllMetadata(MDs);
2200  assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2201  verifyFunctionMetadata(MDs);
2202 
2203  // Check validity of the personality function
2204  if (F.hasPersonalityFn()) {
2205  auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2206  if (Per)
2207  Assert(Per->getParent() == F.getParent(),
2208  "Referencing personality function in another module!",
2209  &F, F.getParent(), Per, Per->getParent());
2210  }
2211 
2212  if (F.isMaterializable()) {
2213  // Function has a body somewhere we can't see.
2214  Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2215  MDs.empty() ? nullptr : MDs.front().second);
2216  } else if (F.isDeclaration()) {
2217  for (const auto &I : MDs) {
2218  AssertDI(I.first != LLVMContext::MD_dbg,
2219  "function declaration may not have a !dbg attachment", &F);
2220  Assert(I.first != LLVMContext::MD_prof,
2221  "function declaration may not have a !prof attachment", &F);
2222 
2223  // Verify the metadata itself.
2224  visitMDNode(*I.second);
2225  }
2226  Assert(!F.hasPersonalityFn(),
2227  "Function declaration shouldn't have a personality routine", &F);
2228  } else {
2229  // Verify that this function (which has a body) is not named "llvm.*". It
2230  // is not legal to define intrinsics.
2231  Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2232 
2233  // Check the entry node
2234  const BasicBlock *Entry = &F.getEntryBlock();
2235  Assert(pred_empty(Entry),
2236  "Entry block to function must not have predecessors!", Entry);
2237 
2238  // The address of the entry block cannot be taken, unless it is dead.
2239  if (Entry->hasAddressTaken()) {
2240  Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2241  "blockaddress may not be used with the entry block!", Entry);
2242  }
2243 
2244  unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2245  // Visit metadata attachments.
2246  for (const auto &I : MDs) {
2247  // Verify that the attachment is legal.
2248  switch (I.first) {
2249  default:
2250  break;
2251  case LLVMContext::MD_dbg: {
2252  ++NumDebugAttachments;
2253  AssertDI(NumDebugAttachments == 1,
2254  "function must have a single !dbg attachment", &F, I.second);
2255  AssertDI(isa<DISubprogram>(I.second),
2256  "function !dbg attachment must be a subprogram", &F, I.second);
2257  auto *SP = cast<DISubprogram>(I.second);
2258  const Function *&AttachedTo = DISubprogramAttachments[SP];
2259  AssertDI(!AttachedTo || AttachedTo == &F,
2260  "DISubprogram attached to more than one function", SP, &F);
2261  AttachedTo = &F;
2262  break;
2263  }
2264  case LLVMContext::MD_prof:
2265  ++NumProfAttachments;
2266  Assert(NumProfAttachments == 1,
2267  "function must have a single !prof attachment", &F, I.second);
2268  break;
2269  }
2270 
2271  // Verify the metadata itself.
2272  visitMDNode(*I.second);
2273  }
2274  }
2275 
2276  // If this function is actually an intrinsic, verify that it is only used in
2277  // direct call/invokes, never having its "address taken".
2278  // Only do this if the module is materialized, otherwise we don't have all the
2279  // uses.
2280  if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2281  const User *U;
2282  if (F.hasAddressTaken(&U))
2283  Assert(false, "Invalid user of intrinsic instruction!", U);
2284  }
2285 
2286  auto *N = F.getSubprogram();
2287  HasDebugInfo = (N != nullptr);
2288  if (!HasDebugInfo)
2289  return;
2290 
2291  // Check that all !dbg attachments lead to back to N (or, at least, another
2292  // subprogram that describes the same function).
2293  //
2294  // FIXME: Check this incrementally while visiting !dbg attachments.
2295  // FIXME: Only check when N is the canonical subprogram for F.
2297  for (auto &BB : F)
2298  for (auto &I : BB) {
2299  // Be careful about using DILocation here since we might be dealing with
2300  // broken code (this is the Verifier after all).
2301  DILocation *DL =
2302  dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
2303  if (!DL)
2304  continue;
2305  if (!Seen.insert(DL).second)
2306  continue;
2307 
2308  Metadata *Parent = DL->getRawScope();
2309  AssertDI(Parent && isa<DILocalScope>(Parent),
2310  "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2311  Parent);
2312  DILocalScope *Scope = DL->getInlinedAtScope();
2313  if (Scope && !Seen.insert(Scope).second)
2314  continue;
2315 
2316  DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2317 
2318  // Scope and SP could be the same MDNode and we don't want to skip
2319  // validation in that case
2320  if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2321  continue;
2322 
2323  // FIXME: Once N is canonical, check "SP == &N".
2324  AssertDI(SP->describes(&F),
2325  "!dbg attachment points at wrong subprogram for function", N, &F,
2326  &I, DL, Scope, SP);
2327  }
2328 }
2329 
2330 // verifyBasicBlock - Verify that a basic block is well formed...
2331 //
2332 void Verifier::visitBasicBlock(BasicBlock &BB) {
2333  InstsInThisBlock.clear();
2334 
2335  // Ensure that basic blocks have terminators!
2336  Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2337 
2338  // Check constraints that this basic block imposes on all of the PHI nodes in
2339  // it.
2340  if (isa<PHINode>(BB.front())) {
2343  llvm::sort(Preds);
2344  for (const PHINode &PN : BB.phis()) {
2345  // Ensure that PHI nodes have at least one entry!
2346  Assert(PN.getNumIncomingValues() != 0,
2347  "PHI nodes must have at least one entry. If the block is dead, "
2348  "the PHI should be removed!",
2349  &PN);
2350  Assert(PN.getNumIncomingValues() == Preds.size(),
2351  "PHINode should have one entry for each predecessor of its "
2352  "parent basic block!",
2353  &PN);
2354 
2355  // Get and sort all incoming values in the PHI node...
2356  Values.clear();
2357  Values.reserve(PN.getNumIncomingValues());
2358  for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2359  Values.push_back(
2360  std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2361  llvm::sort(Values);
2362 
2363  for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2364  // Check to make sure that if there is more than one entry for a
2365  // particular basic block in this PHI node, that the incoming values are
2366  // all identical.
2367  //
2368  Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2369  Values[i].second == Values[i - 1].second,
2370  "PHI node has multiple entries for the same basic block with "
2371  "different incoming values!",
2372  &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2373 
2374  // Check to make sure that the predecessors and PHI node entries are
2375  // matched up.
2376  Assert(Values[i].first == Preds[i],
2377  "PHI node entries do not match predecessors!", &PN,
2378  Values[i].first, Preds[i]);
2379  }
2380  }
2381  }
2382 
2383  // Check that all instructions have their parent pointers set up correctly.
2384  for (auto &I : BB)
2385  {
2386  Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2387  }
2388 }
2389 
2390 void Verifier::visitTerminator(Instruction &I) {
2391  // Ensure that terminators only exist at the end of the basic block.
2392  Assert(&I == I.getParent()->getTerminator(),
2393  "Terminator found in the middle of a basic block!", I.getParent());
2394  visitInstruction(I);
2395 }
2396 
2397 void Verifier::visitBranchInst(BranchInst &BI) {
2398  if (BI.isConditional()) {
2399  Assert(BI.getCondition()->getType()->isIntegerTy(1),
2400  "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2401  }
2402  visitTerminator(BI);
2403 }
2404 
2405 void Verifier::visitReturnInst(ReturnInst &RI) {
2406  Function *F = RI.getParent()->getParent();
2407  unsigned N = RI.getNumOperands();
2408  if (F->getReturnType()->isVoidTy())
2409  Assert(N == 0,
2410  "Found return instr that returns non-void in Function of void "
2411  "return type!",
2412  &RI, F->getReturnType());
2413  else
2414  Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2415  "Function return type does not match operand "
2416  "type of return inst!",
2417  &RI, F->getReturnType());
2418 
2419  // Check to make sure that the return value has necessary properties for
2420  // terminators...
2421  visitTerminator(RI);
2422 }
2423 
2424 void Verifier::visitSwitchInst(SwitchInst &SI) {
2425  // Check to make sure that all of the constants in the switch instruction
2426  // have the same type as the switched-on value.
2427  Type *SwitchTy = SI.getCondition()->getType();
2429  for (auto &Case : SI.cases()) {
2430  Assert(Case.getCaseValue()->getType() == SwitchTy,
2431  "Switch constants must all be same type as switch value!", &SI);
2432  Assert(Constants.insert(Case.getCaseValue()).second,
2433  "Duplicate integer as switch case", &SI, Case.getCaseValue());
2434  }
2435 
2436  visitTerminator(SI);
2437 }
2438 
2439 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2440  Assert(BI.getAddress()->getType()->isPointerTy(),
2441  "Indirectbr operand must have pointer type!", &BI);
2442  for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2443  Assert(BI.getDestination(i)->getType()->isLabelTy(),
2444  "Indirectbr destinations must all have pointer type!", &BI);
2445 
2446  visitTerminator(BI);
2447 }
2448 
2449 void Verifier::visitCallBrInst(CallBrInst &CBI) {
2450  Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!",
2451  &CBI);
2452  Assert(CBI.getType()->isVoidTy(), "Callbr return value is not supported!",
2453  &CBI);
2454  for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
2455  Assert(CBI.getSuccessor(i)->getType()->isLabelTy(),
2456  "Callbr successors must all have pointer type!", &CBI);
2457  for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
2458  Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)),
2459  "Using an unescaped label as a callbr argument!", &CBI);
2460  if (isa<BasicBlock>(CBI.getOperand(i)))
2461  for (unsigned j = i + 1; j != e; ++j)
2462  Assert(CBI.getOperand(i) != CBI.getOperand(j),
2463  "Duplicate callbr destination!", &CBI);
2464  }
2465 
2466  visitTerminator(CBI);
2467 }
2468 
2469 void Verifier::visitSelectInst(SelectInst &SI) {
2471  SI.getOperand(2)),
2472  "Invalid operands for select instruction!", &SI);
2473 
2474  Assert(SI.getTrueValue()->getType() == SI.getType(),
2475  "Select values must have same type as select instruction!", &SI);
2476  visitInstruction(SI);
2477 }
2478 
2479 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2480 /// a pass, if any exist, it's an error.
2481 ///
2482 void Verifier::visitUserOp1(Instruction &I) {
2483  Assert(false, "User-defined operators should not live outside of a pass!", &I);
2484 }
2485 
2486 void Verifier::visitTruncInst(TruncInst &I) {
2487  // Get the source and destination types
2488  Type *SrcTy = I.getOperand(0)->getType();
2489  Type *DestTy = I.getType();
2490 
2491  // Get the size of the types in bits, we'll need this later
2492  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2493  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2494 
2495  Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2496  Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2497  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2498  "trunc source and destination must both be a vector or neither", &I);
2499  Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2500 
2501  visitInstruction(I);
2502 }
2503 
2504 void Verifier::visitZExtInst(ZExtInst &I) {
2505  // Get the source and destination types
2506  Type *SrcTy = I.getOperand(0)->getType();
2507  Type *DestTy = I.getType();
2508 
2509  // Get the size of the types in bits, we'll need this later
2510  Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2511  Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2512  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2513  "zext source and destination must both be a vector or neither", &I);
2514  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2515  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2516 
2517  Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2518 
2519  visitInstruction(I);
2520 }
2521 
2522 void Verifier::visitSExtInst(SExtInst &I) {
2523  // Get the source and destination types
2524  Type *SrcTy = I.getOperand(0)->getType();
2525  Type *DestTy = I.getType();
2526 
2527  // Get the size of the types in bits, we'll need this later
2528  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2529  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2530 
2531  Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2532  Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2533  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2534  "sext source and destination must both be a vector or neither", &I);
2535  Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2536 
2537  visitInstruction(I);
2538 }
2539 
2540 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2541  // Get the source and destination types
2542  Type *SrcTy = I.getOperand(0)->getType();
2543  Type *DestTy = I.getType();
2544  // Get the size of the types in bits, we'll need this later
2545  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2546  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2547 
2548  Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2549  Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2550  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2551  "fptrunc source and destination must both be a vector or neither", &I);
2552  Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2553 
2554  visitInstruction(I);
2555 }
2556 
2557 void Verifier::visitFPExtInst(FPExtInst &I) {
2558  // Get the source and destination types
2559  Type *SrcTy = I.getOperand(0)->getType();
2560  Type *DestTy = I.getType();
2561 
2562  // Get the size of the types in bits, we'll need this later
2563  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2564  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2565 
2566  Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2567  Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2568  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2569  "fpext source and destination must both be a vector or neither", &I);
2570  Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2571 
2572  visitInstruction(I);
2573 }
2574 
2575 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2576  // Get the source and destination types
2577  Type *SrcTy = I.getOperand(0)->getType();
2578  Type *DestTy = I.getType();
2579 
2580  bool SrcVec = SrcTy->isVectorTy();
2581  bool DstVec = DestTy->isVectorTy();
2582 
2583  Assert(SrcVec == DstVec,
2584  "UIToFP source and dest must both be vector or scalar", &I);
2585  Assert(SrcTy->isIntOrIntVectorTy(),
2586  "UIToFP source must be integer or integer vector", &I);
2587  Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2588  &I);
2589 
2590  if (SrcVec && DstVec)
2591  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2592  cast<VectorType>(DestTy)->getNumElements(),
2593  "UIToFP source and dest vector length mismatch", &I);
2594 
2595  visitInstruction(I);
2596 }
2597 
2598 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2599  // Get the source and destination types
2600  Type *SrcTy = I.getOperand(0)->getType();
2601  Type *DestTy = I.getType();
2602 
2603  bool SrcVec = SrcTy->isVectorTy();
2604  bool DstVec = DestTy->isVectorTy();
2605 
2606  Assert(SrcVec == DstVec,
2607  "SIToFP source and dest must both be vector or scalar", &I);
2608  Assert(SrcTy->isIntOrIntVectorTy(),
2609  "SIToFP source must be integer or integer vector", &I);
2610  Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2611  &I);
2612 
2613  if (SrcVec && DstVec)
2614  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2615  cast<VectorType>(DestTy)->getNumElements(),
2616  "SIToFP source and dest vector length mismatch", &I);
2617 
2618  visitInstruction(I);
2619 }
2620 
2621 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2622  // Get the source and destination types
2623  Type *SrcTy = I.getOperand(0)->getType();
2624  Type *DestTy = I.getType();
2625 
2626  bool SrcVec = SrcTy->isVectorTy();
2627  bool DstVec = DestTy->isVectorTy();
2628 
2629  Assert(SrcVec == DstVec,
2630  "FPToUI source and dest must both be vector or scalar", &I);
2631  Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2632  &I);
2633  Assert(DestTy->isIntOrIntVectorTy(),
2634  "FPToUI result must be integer or integer vector", &I);
2635 
2636  if (SrcVec && DstVec)
2637  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2638  cast<VectorType>(DestTy)->getNumElements(),
2639  "FPToUI source and dest vector length mismatch", &I);
2640 
2641  visitInstruction(I);
2642 }
2643 
2644 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2645  // Get the source and destination types
2646  Type *SrcTy = I.getOperand(0)->getType();
2647  Type *DestTy = I.getType();
2648 
2649  bool SrcVec = SrcTy->isVectorTy();
2650  bool DstVec = DestTy->isVectorTy();
2651 
2652  Assert(SrcVec == DstVec,
2653  "FPToSI source and dest must both be vector or scalar", &I);
2654  Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2655  &I);
2656  Assert(DestTy->isIntOrIntVectorTy(),
2657  "FPToSI result must be integer or integer vector", &I);
2658 
2659  if (SrcVec && DstVec)
2660  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2661  cast<VectorType>(DestTy)->getNumElements(),
2662  "FPToSI source and dest vector length mismatch", &I);
2663 
2664  visitInstruction(I);
2665 }
2666 
2667 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2668  // Get the source and destination types
2669  Type *SrcTy = I.getOperand(0)->getType();
2670  Type *DestTy = I.getType();
2671 
2672  Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2673 
2674  if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2676  "ptrtoint not supported for non-integral pointers");
2677 
2678  Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2679  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2680  &I);
2681 
2682  if (SrcTy->isVectorTy()) {
2683  VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2684  VectorType *VDest = dyn_cast<VectorType>(DestTy);
2685  Assert(VSrc->getNumElements() == VDest->getNumElements(),
2686  "PtrToInt Vector width mismatch", &I);
2687  }
2688 
2689  visitInstruction(I);
2690 }
2691 
2692 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2693  // Get the source and destination types
2694  Type *SrcTy = I.getOperand(0)->getType();
2695  Type *DestTy = I.getType();
2696 
2697  Assert(SrcTy->isIntOrIntVectorTy(),
2698  "IntToPtr source must be an integral", &I);
2699  Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2700 
2701  if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2703  "inttoptr not supported for non-integral pointers");
2704 
2705  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2706  &I);
2707  if (SrcTy->isVectorTy()) {
2708  VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2709  VectorType *VDest = dyn_cast<VectorType>(DestTy);
2710  Assert(VSrc->getNumElements() == VDest->getNumElements(),
2711  "IntToPtr Vector width mismatch", &I);
2712  }
2713  visitInstruction(I);
2714 }
2715 
2716 void Verifier::visitBitCastInst(BitCastInst &I) {
2717  Assert(
2718  CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2719  "Invalid bitcast", &I);
2720  visitInstruction(I);
2721 }
2722 
2723 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2724  Type *SrcTy = I.getOperand(0)->getType();
2725  Type *DestTy = I.getType();
2726 
2727  Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2728  &I);
2729  Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2730  &I);
2732  "AddrSpaceCast must be between different address spaces", &I);
2733  if (SrcTy->isVectorTy())
2734  Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2735  "AddrSpaceCast vector pointer number of elements mismatch", &I);
2736  visitInstruction(I);
2737 }
2738 
2739 /// visitPHINode - Ensure that a PHI node is well formed.
2740 ///
2741 void Verifier::visitPHINode(PHINode &PN) {
2742  // Ensure that the PHI nodes are all grouped together at the top of the block.
2743  // This can be tested by checking whether the instruction before this is
2744  // either nonexistent (because this is begin()) or is a PHI node. If not,
2745  // then there is some other instruction before a PHI.
2746  Assert(&PN == &PN.getParent()->front() ||
2747  isa<PHINode>(--BasicBlock::iterator(&PN)),
2748  "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2749 
2750  // Check that a PHI doesn't yield a Token.
2751  Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2752 
2753  // Check that all of the values of the PHI node have the same type as the
2754  // result, and that the incoming blocks are really basic blocks.
2755  for (Value *IncValue : PN.incoming_values()) {
2756  Assert(PN.getType() == IncValue->getType(),
2757  "PHI node operands are not the same type as the result!", &PN);
2758  }
2759 
2760  // All other PHI node constraints are checked in the visitBasicBlock method.
2761 
2762  visitInstruction(PN);
2763 }
2764 
2765 void Verifier::visitCallBase(CallBase &Call) {
2767  "Called function must be a pointer!", Call);
2768  PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
2769 
2770  Assert(FPTy->getElementType()->isFunctionTy(),
2771  "Called function is not pointer to function type!", Call);
2772 
2773  Assert(FPTy->getElementType() == Call.getFunctionType(),
2774  "Called function is not the same type as the call!", Call);
2775 
2776  FunctionType *FTy = Call.getFunctionType();
2777 
2778  // Verify that the correct number of arguments are being passed
2779  if (FTy->isVarArg())
2780  Assert(Call.arg_size() >= FTy->getNumParams(),
2781  "Called function requires more parameters than were provided!",
2782  Call);
2783  else
2784  Assert(Call.arg_size() == FTy->getNumParams(),
2785  "Incorrect number of arguments passed to called function!", Call);
2786 
2787  // Verify that all arguments to the call match the function type.
2788  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2789  Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
2790  "Call parameter type does not match function signature!",
2791  Call.getArgOperand(i), FTy->getParamType(i), Call);
2792 
2793  AttributeList Attrs = Call.getAttributes();
2794 
2795  Assert(verifyAttributeCount(Attrs, Call.arg_size()),
2796  "Attribute after last parameter!", Call);
2797 
2798  bool IsIntrinsic = Call.getCalledFunction() &&
2799  Call.getCalledFunction()->getName().startswith("llvm.");
2800 
2801  Function *Callee
2803 
2804  if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2805  // Don't allow speculatable on call sites, unless the underlying function
2806  // declaration is also speculatable.
2807  Assert(Callee && Callee->isSpeculatable(),
2808  "speculatable attribute may not apply to call sites", Call);
2809  }
2810 
2811  // Verify call attributes.
2812  verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic);
2813 
2814  // Conservatively check the inalloca argument.
2815  // We have a bug if we can find that there is an underlying alloca without
2816  // inalloca.
2817  if (Call.hasInAllocaArgument()) {
2818  Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
2819  if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2820  Assert(AI->isUsedWithInAlloca(),
2821  "inalloca argument for call has mismatched alloca", AI, Call);
2822  }
2823 
2824  // For each argument of the callsite, if it has the swifterror argument,
2825  // make sure the underlying alloca/parameter it comes from has a swifterror as
2826  // well.
2827  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
2828  if (Call.paramHasAttr(i, Attribute::SwiftError)) {
2829  Value *SwiftErrorArg = Call.getArgOperand(i);
2830  if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2831  Assert(AI->isSwiftError(),
2832  "swifterror argument for call has mismatched alloca", AI, Call);
2833  continue;
2834  }
2835  auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2836  Assert(ArgI,
2837  "swifterror argument should come from an alloca or parameter",
2838  SwiftErrorArg, Call);
2839  Assert(ArgI->hasSwiftErrorAttr(),
2840  "swifterror argument for call has mismatched parameter", ArgI,
2841  Call);
2842  }
2843 
2844  if (Attrs.hasParamAttribute(i, Attribute::ImmArg)) {
2845  // Don't allow immarg on call sites, unless the underlying declaration
2846  // also has the matching immarg.
2847  Assert(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
2848  "immarg may not apply only to call sites",
2849  Call.getArgOperand(i), Call);
2850  }
2851 
2852  if (Call.paramHasAttr(i, Attribute::ImmArg)) {
2853  Value *ArgVal = Call.getArgOperand(i);
2854  Assert(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
2855  "immarg operand has non-immediate parameter", ArgVal, Call);
2856  }
2857  }
2858 
2859  if (FTy->isVarArg()) {
2860  // FIXME? is 'nest' even legal here?
2861  bool SawNest = false;
2862  bool SawReturned = false;
2863 
2864  for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2865  if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2866  SawNest = true;
2867  if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2868  SawReturned = true;
2869  }
2870 
2871  // Check attributes on the varargs part.
2872  for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
2873  Type *Ty = Call.getArgOperand(Idx)->getType();
2874  AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2875  verifyParameterAttrs(ArgAttrs, Ty, &Call);
2876 
2877  if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2878  Assert(!SawNest, "More than one parameter has attribute nest!", Call);
2879  SawNest = true;
2880  }
2881 
2882  if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2883  Assert(!SawReturned, "More than one parameter has attribute returned!",
2884  Call);
2885  Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2886  "Incompatible argument and return types for 'returned' "
2887  "attribute",
2888  Call);
2889  SawReturned = true;
2890  }
2891 
2892  // Statepoint intrinsic is vararg but the wrapped function may be not.
2893  // Allow sret here and check the wrapped function in verifyStatepoint.
2894  if (!Call.getCalledFunction() ||
2895  Call.getCalledFunction()->getIntrinsicID() !=
2896  Intrinsic::experimental_gc_statepoint)
2897  Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2898  "Attribute 'sret' cannot be used for vararg call arguments!",
2899  Call);
2900 
2901  if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2902  Assert(Idx == Call.arg_size() - 1,
2903  "inalloca isn't on the last argument!", Call);
2904  }
2905  }
2906 
2907  // Verify that there's no metadata unless it's a direct call to an intrinsic.
2908  if (!IsIntrinsic) {
2909  for (Type *ParamTy : FTy->params()) {
2910  Assert(!ParamTy->isMetadataTy(),
2911  "Function has metadata parameter but isn't an intrinsic", Call);
2912  Assert(!ParamTy->isTokenTy(),
2913  "Function has token parameter but isn't an intrinsic", Call);
2914  }
2915  }
2916 
2917  // Verify that indirect calls don't return tokens.
2918  if (!Call.getCalledFunction())
2919  Assert(!FTy->getReturnType()->isTokenTy(),
2920  "Return type cannot be token for indirect call!");
2921 
2922  if (Function *F = Call.getCalledFunction())
2924  visitIntrinsicCall(ID, Call);
2925 
2926  // Verify that a callsite has at most one "deopt", at most one "funclet" and
2927  // at most one "gc-transition" operand bundle.
2928  bool FoundDeoptBundle = false, FoundFuncletBundle = false,
2929  FoundGCTransitionBundle = false;
2930  for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
2931  OperandBundleUse BU = Call.getOperandBundleAt(i);
2932  uint32_t Tag = BU.getTagID();
2933  if (Tag == LLVMContext::OB_deopt) {
2934  Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
2935  FoundDeoptBundle = true;
2936  } else if (Tag == LLVMContext::OB_gc_transition) {
2937  Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
2938  Call);
2939  FoundGCTransitionBundle = true;
2940  } else if (Tag == LLVMContext::OB_funclet) {
2941  Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
2942  FoundFuncletBundle = true;
2943  Assert(BU.Inputs.size() == 1,
2944  "Expected exactly one funclet bundle operand", Call);
2945  Assert(isa<FuncletPadInst>(BU.Inputs.front()),
2946  "Funclet bundle operands should correspond to a FuncletPadInst",
2947  Call);
2948  }
2949  }
2950 
2951  // Verify that each inlinable callsite of a debug-info-bearing function in a
2952  // debug-info-bearing function has a debug location attached to it. Failure to
2953  // do so causes assertion failures when the inliner sets up inline scope info.
2954  if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
2955  Call.getCalledFunction()->getSubprogram())
2956  AssertDI(Call.getDebugLoc(),
2957  "inlinable function call in a function with "
2958  "debug info must have a !dbg location",
2959  Call);
2960 
2961  visitInstruction(Call);
2962 }
2963 
2964 /// Two types are "congruent" if they are identical, or if they are both pointer
2965 /// types with different pointee types and the same address space.
2966 static bool isTypeCongruent(Type *L, Type *R) {
2967  if (L == R)
2968  return true;
2971  if (!PL || !PR)
2972  return false;
2973  return PL->getAddressSpace() == PR->getAddressSpace();
2974 }
2975 
2977  static const Attribute::AttrKind ABIAttrs[] = {
2978  Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
2979  Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
2980  Attribute::SwiftError};
2981  AttrBuilder Copy;
2982  for (auto AK : ABIAttrs) {
2983  if (Attrs.hasParamAttribute(I, AK))
2984  Copy.addAttribute(AK);
2985  }
2986  if (Attrs.hasParamAttribute(I, Attribute::Alignment))
2987  Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
2988  return Copy;
2989 }
2990 
2991 void Verifier::verifyMustTailCall(CallInst &CI) {
2992  Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
2993 
2994  // - The caller and callee prototypes must match. Pointer types of
2995  // parameters or return types may differ in pointee type, but not
2996  // address space.
2997  Function *F = CI.getParent()->getParent();
2998  FunctionType *CallerTy = F->getFunctionType();
2999  FunctionType *CalleeTy = CI.getFunctionType();
3000  if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3001  Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3002  "cannot guarantee tail call due to mismatched parameter counts",
3003  &CI);
3004  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3005  Assert(
3006  isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3007  "cannot guarantee tail call due to mismatched parameter types", &CI);
3008  }
3009  }
3010  Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3011  "cannot guarantee tail call due to mismatched varargs", &CI);
3012  Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3013  "cannot guarantee tail call due to mismatched return types", &CI);
3014 
3015  // - The calling conventions of the caller and callee must match.
3016  Assert(F->getCallingConv() == CI.getCallingConv(),
3017  "cannot guarantee tail call due to mismatched calling conv", &CI);
3018 
3019  // - All ABI-impacting function attributes, such as sret, byval, inreg,
3020  // returned, and inalloca, must match.
3021  AttributeList CallerAttrs = F->getAttributes();
3022  AttributeList CalleeAttrs = CI.getAttributes();
3023  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3024  AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
3025  AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
3026  Assert(CallerABIAttrs == CalleeABIAttrs,
3027  "cannot guarantee tail call due to mismatched ABI impacting "
3028  "function attributes",
3029  &CI, CI.getOperand(I));
3030  }
3031 
3032  // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3033  // or a pointer bitcast followed by a ret instruction.
3034  // - The ret instruction must return the (possibly bitcasted) value
3035  // produced by the call or void.
3036  Value *RetVal = &CI;
3037  Instruction *Next = CI.getNextNode();
3038 
3039  // Handle the optional bitcast.
3040  if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3041  Assert(BI->getOperand(0) == RetVal,
3042  "bitcast following musttail call must use the call", BI);
3043  RetVal = BI;
3044  Next = BI->getNextNode();
3045  }
3046 
3047  // Check the return.
3048  ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3049  Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3050  &CI);
3051  Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3052  "musttail call result must be returned", Ret);
3053 }
3054 
3055 void Verifier::visitCallInst(CallInst &CI) {
3056  visitCallBase(CI);
3057 
3058  if (CI.isMustTailCall())
3059  verifyMustTailCall(CI);
3060 }
3061 
3062 void Verifier::visitInvokeInst(InvokeInst &II) {
3063  visitCallBase(II);
3064 
3065  // Verify that the first non-PHI instruction of the unwind destination is an
3066  // exception handling instruction.
3067  Assert(
3068  II.getUnwindDest()->isEHPad(),
3069  "The unwind destination does not have an exception handling instruction!",
3070  &II);
3071 
3072  visitTerminator(II);
3073 }
3074 
3075 /// visitUnaryOperator - Check the argument to the unary operator.
3076 ///
3077 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3078  Assert(U.getType() == U.getOperand(0)->getType(),
3079  "Unary operators must have same type for"
3080  "operands and result!",
3081  &U);
3082 
3083  switch (U.getOpcode()) {
3084  // Check that floating-point arithmetic operators are only used with
3085  // floating-point operands.
3086  case Instruction::FNeg:
3088  "FNeg operator only works with float types!", &U);
3089  break;
3090  default:
3091  llvm_unreachable("Unknown UnaryOperator opcode!");
3092  }
3093 
3094  visitInstruction(U);
3095 }
3096 
3097 /// visitBinaryOperator - Check that both arguments to the binary operator are
3098 /// of the same type!
3099 ///
3100 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3101  Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3102  "Both operands to a binary operator are not of the same type!", &B);
3103 
3104  switch (B.getOpcode()) {
3105  // Check that integer arithmetic operators are only used with
3106  // integral operands.
3107  case Instruction::Add:
3108  case Instruction::Sub:
3109  case Instruction::Mul:
3110  case Instruction::SDiv:
3111  case Instruction::UDiv:
3112  case Instruction::SRem:
3113  case Instruction::URem:
3115  "Integer arithmetic operators only work with integral types!", &B);
3116  Assert(B.getType() == B.getOperand(0)->getType(),
3117  "Integer arithmetic operators must have same type "
3118  "for operands and result!",
3119  &B);
3120  break;
3121  // Check that floating-point arithmetic operators are only used with
3122  // floating-point operands.
3123  case Instruction::FAdd:
3124  case Instruction::FSub:
3125  case Instruction::FMul:
3126  case Instruction::FDiv:
3127  case Instruction::FRem:
3129  "Floating-point arithmetic operators only work with "
3130  "floating-point types!",
3131  &B);
3132  Assert(B.getType() == B.getOperand(0)->getType(),
3133  "Floating-point arithmetic operators must have same type "
3134  "for operands and result!",
3135  &B);
3136  break;
3137  // Check that logical operators are only used with integral operands.
3138  case Instruction::And:
3139  case Instruction::Or:
3140  case Instruction::Xor:
3142  "Logical operators only work with integral types!", &B);
3143  Assert(B.getType() == B.getOperand(0)->getType(),
3144  "Logical operators must have same type for operands and result!",
3145  &B);
3146  break;
3147  case Instruction::Shl:
3148  case Instruction::LShr:
3149  case Instruction::AShr:
3151  "Shifts only work with integral types!", &B);
3152  Assert(B.getType() == B.getOperand(0)->getType(),
3153  "Shift return type must be same as operands!", &B);
3154  break;
3155  default:
3156  llvm_unreachable("Unknown BinaryOperator opcode!");
3157  }
3158 
3159  visitInstruction(B);
3160 }
3161 
3162 void Verifier::visitICmpInst(ICmpInst &IC) {
3163  // Check that the operands are the same type
3164  Type *Op0Ty = IC.getOperand(0)->getType();
3165  Type *Op1Ty = IC.getOperand(1)->getType();
3166  Assert(Op0Ty == Op1Ty,
3167  "Both operands to ICmp instruction are not of the same type!", &IC);
3168  // Check that the operands are the right type
3169  Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3170  "Invalid operand types for ICmp instruction", &IC);
3171  // Check that the predicate is valid.
3172  Assert(IC.isIntPredicate(),
3173  "Invalid predicate in ICmp instruction!", &IC);
3174 
3175  visitInstruction(IC);
3176 }
3177 
3178 void Verifier::visitFCmpInst(FCmpInst &FC) {
3179  // Check that the operands are the same type
3180  Type *Op0Ty = FC.getOperand(0)->getType();
3181  Type *Op1Ty = FC.getOperand(1)->getType();
3182  Assert(Op0Ty == Op1Ty,
3183  "Both operands to FCmp instruction are not of the same type!", &FC);
3184  // Check that the operands are the right type
3185  Assert(Op0Ty->isFPOrFPVectorTy(),
3186  "Invalid operand types for FCmp instruction", &FC);
3187  // Check that the predicate is valid.
3188  Assert(FC.isFPPredicate(),
3189  "Invalid predicate in FCmp instruction!", &FC);
3190 
3191  visitInstruction(FC);
3192 }
3193 
3194 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3195  Assert(
3197  "Invalid extractelement operands!", &EI);
3198  visitInstruction(EI);
3199 }
3200 
3201 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3203  IE.getOperand(2)),
3204  "Invalid insertelement operands!", &IE);
3205  visitInstruction(IE);
3206 }
3207 
3208 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3210  SV.getOperand(2)),
3211  "Invalid shufflevector operands!", &SV);
3212  visitInstruction(SV);
3213 }
3214 
3215 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3216  Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3217 
3218  Assert(isa<PointerType>(TargetTy),
3219  "GEP base pointer is not a vector or a vector of pointers", &GEP);
3220  Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3221 
3222  SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3223  Assert(all_of(
3224  Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3225  "GEP indexes must be integers", &GEP);
3226  Type *ElTy =
3228  Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3229 
3230  Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3231  GEP.getResultElementType() == ElTy,
3232  "GEP is not of right type for indices!", &GEP, ElTy);
3233 
3234  if (GEP.getType()->isVectorTy()) {
3235  // Additional checks for vector GEPs.
3236  unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3237  if (GEP.getPointerOperandType()->isVectorTy())
3238  Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3239  "Vector GEP result width doesn't match operand's", &GEP);
3240  for (Value *Idx : Idxs) {
3241  Type *IndexTy = Idx->getType();
3242  if (IndexTy->isVectorTy()) {
3243  unsigned IndexWidth = IndexTy->getVectorNumElements();
3244  Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3245  }
3246  Assert(IndexTy->isIntOrIntVectorTy(),
3247  "All GEP indices should be of integer type");
3248  }
3249  }
3250 
3251  if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3252  Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3253  "GEP address space doesn't match type", &GEP);
3254  }
3255 
3256  visitInstruction(GEP);
3257 }
3258 
3259 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3260  return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3261 }
3262 
3263 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3264  assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3265  "precondition violation");
3266 
3267  unsigned NumOperands = Range->getNumOperands();
3268  Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3269  unsigned NumRanges = NumOperands / 2;
3270  Assert(NumRanges >= 1, "It should have at least one range!", Range);
3271 
3272  ConstantRange LastRange(1, true); // Dummy initial value
3273  for (unsigned i = 0; i < NumRanges; ++i) {
3274  ConstantInt *Low =
3275  mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3276  Assert(Low, "The lower limit must be an integer!", Low);
3277  ConstantInt *High =
3278  mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3279  Assert(High, "The upper limit must be an integer!", High);
3280  Assert(High->getType() == Low->getType() && High->getType() == Ty,
3281  "Range types must match instruction type!", &I);
3282 
3283  APInt HighV = High->getValue();
3284  APInt LowV = Low->getValue();
3285  ConstantRange CurRange(LowV, HighV);
3286  Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3287  "Range must not be empty!", Range);
3288  if (i != 0) {
3289  Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3290  "Intervals are overlapping", Range);
3291  Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3292  Range);
3293  Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3294  Range);
3295  }
3296  LastRange = ConstantRange(LowV, HighV);
3297  }
3298  if (NumRanges > 2) {
3299  APInt FirstLow =
3300  mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3301  APInt FirstHigh =
3302  mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3303  ConstantRange FirstRange(FirstLow, FirstHigh);
3304  Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3305  "Intervals are overlapping", Range);
3306  Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3307  Range);
3308  }
3309 }
3310 
3311 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3312  unsigned Size = DL.getTypeSizeInBits(Ty);
3313  Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3314  Assert(!(Size & (Size - 1)),
3315  "atomic memory access' operand must have a power-of-two size", Ty, I);
3316 }
3317 
3318 void Verifier::visitLoadInst(LoadInst &LI) {
3320  Assert(PTy, "Load operand must be a pointer.", &LI);
3321  Type *ElTy = LI.getType();
3323  "huge alignment values are unsupported", &LI);
3324  Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3325  if (LI.isAtomic()) {
3328  "Load cannot have Release ordering", &LI);
3329  Assert(LI.getAlignment() != 0,
3330  "Atomic load must specify explicit alignment", &LI);
3331  Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3332  "atomic load operand must have integer, pointer, or floating point "
3333  "type!",
3334  ElTy, &LI);
3335  checkAtomicMemAccessSize(ElTy, &LI);
3336  } else {
3338  "Non-atomic load cannot have SynchronizationScope specified", &LI);
3339  }
3340 
3341  visitInstruction(LI);
3342 }
3343 
3344 void Verifier::visitStoreInst(StoreInst &SI) {
3346  Assert(PTy, "Store operand must be a pointer.", &SI);
3347  Type *ElTy = PTy->getElementType();
3348  Assert(ElTy == SI.getOperand(0)->getType(),
3349  "Stored value type does not match pointer operand type!", &SI, ElTy);
3351  "huge alignment values are unsupported", &SI);
3352  Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3353  if (SI.isAtomic()) {
3356  "Store cannot have Acquire ordering", &SI);
3357  Assert(SI.getAlignment() != 0,
3358  "Atomic store must specify explicit alignment", &SI);
3359  Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3360  "atomic store operand must have integer, pointer, or floating point "
3361  "type!",
3362  ElTy, &SI);
3363  checkAtomicMemAccessSize(ElTy, &SI);
3364  } else {
3366  "Non-atomic store cannot have SynchronizationScope specified", &SI);
3367  }
3368  visitInstruction(SI);
3369 }
3370 
3371 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
3372 void Verifier::verifySwiftErrorCall(CallBase &Call,
3373  const Value *SwiftErrorVal) {
3374  unsigned Idx = 0;
3375  for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
3376  if (*I == SwiftErrorVal) {
3377  Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
3378  "swifterror value when used in a callsite should be marked "
3379  "with swifterror attribute",
3380  SwiftErrorVal, Call);
3381  }
3382  }
3383 }
3384 
3385 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3386  // Check that swifterror value is only used by loads, stores, or as
3387  // a swifterror argument.
3388  for (const User *U : SwiftErrorVal->users()) {
3389  Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3390  isa<InvokeInst>(U),
3391  "swifterror value can only be loaded and stored from, or "
3392  "as a swifterror argument!",
3393  SwiftErrorVal, U);
3394  // If it is used by a store, check it is the second operand.
3395  if (auto StoreI = dyn_cast<StoreInst>(U))
3396  Assert(StoreI->getOperand(1) == SwiftErrorVal,
3397  "swifterror value should be the second operand when used "
3398  "by stores", SwiftErrorVal, U);
3399  if (auto *Call = dyn_cast<CallBase>(U))
3400  verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3401  }
3402 }
3403 
3404 void Verifier::visitAllocaInst(AllocaInst &AI) {
3405  SmallPtrSet<Type*, 4> Visited;
3406  PointerType *PTy = AI.getType();
3407  // TODO: Relax this restriction?
3409  "Allocation instruction pointer not in the stack address space!",
3410  &AI);
3411  Assert(AI.getAllocatedType()->isSized(&Visited),
3412  "Cannot allocate unsized type", &AI);
3414  "Alloca array size must have integer type", &AI);
3416  "huge alignment values are unsupported", &AI);
3417 
3418  if (AI.isSwiftError()) {
3419  verifySwiftErrorValue(&AI);
3420  }
3421 
3422  visitInstruction(AI);
3423 }
3424 
3425 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3426 
3427  // FIXME: more conditions???
3429  "cmpxchg instructions must be atomic.", &CXI);
3431  "cmpxchg instructions must be atomic.", &CXI);
3433  "cmpxchg instructions cannot be unordered.", &CXI);
3435  "cmpxchg instructions cannot be unordered.", &CXI);
3437  "cmpxchg instructions failure argument shall be no stronger than the "
3438  "success argument",
3439  &CXI);
3442  "cmpxchg failure ordering cannot include release semantics", &CXI);
3443 
3444  PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3445  Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3446  Type *ElTy = PTy->getElementType();
3447  Assert(ElTy->isIntOrPtrTy(),
3448  "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3449  checkAtomicMemAccessSize(ElTy, &CXI);
3450  Assert(ElTy == CXI.getOperand(1)->getType(),
3451  "Expected value type does not match pointer operand type!", &CXI,
3452  ElTy);
3453  Assert(ElTy == CXI.getOperand(2)->getType(),
3454  "Stored value type does not match pointer operand type!", &CXI, ElTy);
3455  visitInstruction(CXI);
3456 }
3457 
3458 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3460  "atomicrmw instructions must be atomic.", &RMWI);
3462  "atomicrmw instructions cannot be unordered.", &RMWI);
3463  auto Op = RMWI.getOperation();
3464  PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3465  Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3466  Type *ElTy = PTy->getElementType();
3467  if (Op == AtomicRMWInst::Xchg) {
3468  Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " +
3470  " operand must have integer or floating point type!",
3471  &RMWI, ElTy);
3472  } else if (AtomicRMWInst::isFPOperation(Op)) {
3473  Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
3475  " operand must have floating point type!",
3476  &RMWI, ElTy);
3477  } else {
3478  Assert(ElTy->isIntegerTy(), "atomicrmw " +
3480  " operand must have integer type!",
3481  &RMWI, ElTy);
3482  }
3483  checkAtomicMemAccessSize(ElTy, &RMWI);
3484  Assert(ElTy == RMWI.getOperand(1)->getType(),
3485  "Argument value type does not match pointer operand type!", &RMWI,
3486  ElTy);
3488  "Invalid binary operation!", &RMWI);
3489  visitInstruction(RMWI);
3490 }
3491 
3492 void Verifier::visitFenceInst(FenceInst &FI) {
3493  const AtomicOrdering Ordering = FI.getOrdering();
3494  Assert(Ordering == AtomicOrdering::Acquire ||
3495  Ordering == AtomicOrdering::Release ||
3496  Ordering == AtomicOrdering::AcquireRelease ||
3498  "fence instructions may only have acquire, release, acq_rel, or "
3499  "seq_cst ordering.",
3500  &FI);
3501  visitInstruction(FI);
3502 }
3503 
3504 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3506  EVI.getIndices()) == EVI.getType(),
3507  "Invalid ExtractValueInst operands!", &EVI);
3508 
3509  visitInstruction(EVI);
3510 }
3511 
3512 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3514  IVI.getIndices()) ==
3515  IVI.getOperand(1)->getType(),
3516  "Invalid InsertValueInst operands!", &IVI);
3517 
3518  visitInstruction(IVI);
3519 }
3520 
3521 static Value *getParentPad(Value *EHPad) {
3522  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3523  return FPI->getParentPad();
3524 
3525  return cast<CatchSwitchInst>(EHPad)->getParentPad();
3526 }
3527 
3528 void Verifier::visitEHPadPredecessors(Instruction &I) {
3529  assert(I.isEHPad());
3530 
3531  BasicBlock *BB = I.getParent();
3532  Function *F = BB->getParent();
3533 
3534  Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3535 
3536  if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3537  // The landingpad instruction defines its parent as a landing pad block. The
3538  // landing pad block may be branched to only by the unwind edge of an
3539  // invoke.
3540  for (BasicBlock *PredBB : predecessors(BB)) {
3541  const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3542  Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3543  "Block containing LandingPadInst must be jumped to "
3544  "only by the unwind edge of an invoke.",
3545  LPI);
3546  }
3547  return;
3548  }
3549  if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3550  if (!pred_empty(BB))
3551  Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3552  "Block containg CatchPadInst must be jumped to "
3553  "only by its catchswitch.",
3554  CPI);
3555  Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3556  "Catchswitch cannot unwind to one of its catchpads",
3557  CPI->getCatchSwitch(), CPI);
3558  return;
3559  }
3560 
3561  // Verify that each pred has a legal terminator with a legal to/from EH
3562  // pad relationship.
3563  Instruction *ToPad = &I;
3564  Value *ToPadParent = getParentPad(ToPad);
3565  for (BasicBlock *PredBB : predecessors(BB)) {
3566  Instruction *TI = PredBB->getTerminator();
3567  Value *FromPad;
3568  if (auto *II = dyn_cast<InvokeInst>(TI)) {
3569  Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3570  "EH pad must be jumped to via an unwind edge", ToPad, II);
3571  if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3572  FromPad = Bundle->Inputs[0];
3573  else
3574  FromPad = ConstantTokenNone::get(II->getContext());
3575  } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3576  FromPad = CRI->getOperand(0);
3577  Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3578  } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3579  FromPad = CSI;
3580  } else {
3581  Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3582  }
3583 
3584  // The edge may exit from zero or more nested pads.
3585  SmallSet<Value *, 8> Seen;
3586  for (;; FromPad = getParentPad(FromPad)) {
3587  Assert(FromPad != ToPad,
3588  "EH pad cannot handle exceptions raised within it", FromPad, TI);
3589  if (FromPad == ToPadParent) {
3590  // This is a legal unwind edge.
3591  break;
3592  }
3593  Assert(!isa<ConstantTokenNone>(FromPad),
3594  "A single unwind edge may only enter one EH pad", TI);
3595  Assert(Seen.insert(FromPad).second,
3596  "EH pad jumps through a cycle of pads", FromPad);
3597  }
3598  }
3599 }
3600 
3601 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3602  // The landingpad instruction is ill-formed if it doesn't have any clauses and
3603  // isn't a cleanup.
3604  Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3605  "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3606 
3607  visitEHPadPredecessors(LPI);
3608 
3609  if (!LandingPadResultTy)
3610  LandingPadResultTy = LPI.getType();
3611  else
3612  Assert(LandingPadResultTy == LPI.getType(),
3613  "The landingpad instruction should have a consistent result type "
3614  "inside a function.",
3615  &LPI);
3616 
3617  Function *F = LPI.getParent()->getParent();
3618  Assert(F->hasPersonalityFn(),
3619  "LandingPadInst needs to be in a function with a personality.", &LPI);
3620 
3621  // The landingpad instruction must be the first non-PHI instruction in the
3622  // block.
3623  Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3624  "LandingPadInst not the first non-PHI instruction in the block.",
3625  &LPI);
3626 
3627  for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3628  Constant *Clause = LPI.getClause(i);
3629  if (LPI.isCatch(i)) {
3630  Assert(isa<PointerType>(Clause->getType()),
3631  "Catch operand does not have pointer type!", &LPI);
3632  } else {
3633  Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3634  Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3635  "Filter operand is not an array of constants!", &LPI);
3636  }
3637  }
3638 
3639  visitInstruction(LPI);
3640 }
3641 
3642 void Verifier::visitResumeInst(ResumeInst &RI) {
3644  "ResumeInst needs to be in a function with a personality.", &RI);
3645 
3646  if (!LandingPadResultTy)
3647  LandingPadResultTy = RI.getValue()->getType();
3648  else
3649  Assert(LandingPadResultTy == RI.getValue()->getType(),
3650  "The resume instruction should have a consistent result type "
3651  "inside a function.",
3652  &RI);
3653 
3654  visitTerminator(RI);
3655 }
3656 
3657 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3658  BasicBlock *BB = CPI.getParent();
3659 
3660  Function *F = BB->getParent();
3661  Assert(F->hasPersonalityFn(),
3662  "CatchPadInst needs to be in a function with a personality.", &CPI);
3663 
3664  Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3665  "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3666  CPI.getParentPad());
3667 
3668  // The catchpad instruction must be the first non-PHI instruction in the
3669  // block.
3670  Assert(BB->getFirstNonPHI() == &CPI,
3671  "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3672 
3673  visitEHPadPredecessors(CPI);
3674  visitFuncletPadInst(CPI);
3675 }
3676 
3677 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3678  Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3679  "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3680  CatchReturn.getOperand(0));
3681 
3682  visitTerminator(CatchReturn);
3683 }
3684 
3685 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3686  BasicBlock *BB = CPI.getParent();
3687 
3688  Function *F = BB->getParent();
3689  Assert(F->hasPersonalityFn(),
3690  "CleanupPadInst needs to be in a function with a personality.", &CPI);
3691 
3692  // The cleanuppad instruction must be the first non-PHI instruction in the
3693  // block.
3694  Assert(BB->getFirstNonPHI() == &CPI,
3695  "CleanupPadInst not the first non-PHI instruction in the block.",
3696  &CPI);
3697 
3698  auto *ParentPad = CPI.getParentPad();
3699  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3700  "CleanupPadInst has an invalid parent.", &CPI);
3701 
3702  visitEHPadPredecessors(CPI);
3703  visitFuncletPadInst(CPI);
3704 }
3705 
3706 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3707  User *FirstUser = nullptr;
3708  Value *FirstUnwindPad = nullptr;
3709  SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3711 
3712  while (!Worklist.empty()) {
3713  FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3714  Assert(Seen.insert(CurrentPad).second,
3715  "FuncletPadInst must not be nested within itself", CurrentPad);
3716  Value *UnresolvedAncestorPad = nullptr;
3717  for (User *U : CurrentPad->users()) {
3718  BasicBlock *UnwindDest;
3719  if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3720  UnwindDest = CRI->getUnwindDest();
3721  } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3722  // We allow catchswitch unwind to caller to nest
3723  // within an outer pad that unwinds somewhere else,
3724  // because catchswitch doesn't have a nounwind variant.
3725  // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3726  if (CSI->unwindsToCaller())
3727  continue;
3728  UnwindDest = CSI->getUnwindDest();
3729  } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3730  UnwindDest = II->getUnwindDest();
3731  } else if (isa<CallInst>(U)) {
3732  // Calls which don't unwind may be found inside funclet
3733  // pads that unwind somewhere else. We don't *require*
3734  // such calls to be annotated nounwind.
3735  continue;
3736  } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3737  // The unwind dest for a cleanup can only be found by
3738  // recursive search. Add it to the worklist, and we'll
3739  // search for its first use that determines where it unwinds.
3740  Worklist.push_back(CPI);
3741  continue;
3742  } else {
3743  Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3744  continue;
3745  }
3746 
3747  Value *UnwindPad;
3748  bool ExitsFPI;
3749  if (UnwindDest) {
3750  UnwindPad = UnwindDest->getFirstNonPHI();
3751  if (!cast<Instruction>(UnwindPad)->isEHPad())
3752  continue;
3753  Value *UnwindParent = getParentPad(UnwindPad);
3754  // Ignore unwind edges that don't exit CurrentPad.
3755  if (UnwindParent == CurrentPad)
3756  continue;
3757  // Determine whether the original funclet pad is exited,
3758  // and if we are scanning nested pads determine how many
3759  // of them are exited so we can stop searching their
3760  // children.
3761  Value *ExitedPad = CurrentPad;
3762  ExitsFPI = false;
3763  do {
3764  if (ExitedPad == &FPI) {
3765  ExitsFPI = true;
3766  // Now we can resolve any ancestors of CurrentPad up to
3767  // FPI, but not including FPI since we need to make sure
3768  // to check all direct users of FPI for consistency.
3769  UnresolvedAncestorPad = &FPI;
3770  break;
3771  }
3772  Value *ExitedParent = getParentPad(ExitedPad);
3773  if (ExitedParent == UnwindParent) {
3774  // ExitedPad is the ancestor-most pad which this unwind
3775  // edge exits, so we can resolve up to it, meaning that
3776  // ExitedParent is the first ancestor still unresolved.
3777  UnresolvedAncestorPad = ExitedParent;
3778  break;
3779  }
3780  ExitedPad = ExitedParent;
3781  } while (!isa<ConstantTokenNone>(ExitedPad));
3782  } else {
3783  // Unwinding to caller exits all pads.
3784  UnwindPad = ConstantTokenNone::get(FPI.getContext());
3785  ExitsFPI = true;
3786  UnresolvedAncestorPad = &FPI;
3787  }
3788 
3789  if (ExitsFPI) {
3790  // This unwind edge exits FPI. Make sure it agrees with other
3791  // such edges.
3792  if (FirstUser) {
3793  Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3794  "pad must have the same unwind "
3795  "dest",
3796  &FPI, U, FirstUser);
3797  } else {
3798  FirstUser = U;
3799  FirstUnwindPad = UnwindPad;
3800  // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3801  if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3802  getParentPad(UnwindPad) == getParentPad(&FPI))
3803  SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
3804  }
3805  }
3806  // Make sure we visit all uses of FPI, but for nested pads stop as
3807  // soon as we know where they unwind to.
3808  if (CurrentPad != &FPI)
3809  break;
3810  }
3811  if (UnresolvedAncestorPad) {
3812  if (CurrentPad == UnresolvedAncestorPad) {
3813  // When CurrentPad is FPI itself, we don't mark it as resolved even if
3814  // we've found an unwind edge that exits it, because we need to verify
3815  // all direct uses of FPI.
3816  assert(CurrentPad == &FPI);
3817  continue;
3818  }
3819  // Pop off the worklist any nested pads that we've found an unwind
3820  // destination for. The pads on the worklist are the uncles,
3821  // great-uncles, etc. of CurrentPad. We've found an unwind destination
3822  // for all ancestors of CurrentPad up to but not including
3823  // UnresolvedAncestorPad.
3824  Value *ResolvedPad = CurrentPad;
3825  while (!Worklist.empty()) {
3826  Value *UnclePad = Worklist.back();
3827  Value *AncestorPad = getParentPad(UnclePad);
3828  // Walk ResolvedPad up the ancestor list until we either find the
3829  // uncle's parent or the last resolved ancestor.
3830  while (ResolvedPad != AncestorPad) {
3831  Value *ResolvedParent = getParentPad(ResolvedPad);
3832  if (ResolvedParent == UnresolvedAncestorPad) {
3833  break;
3834  }
3835  ResolvedPad = ResolvedParent;
3836  }
3837  // If the resolved ancestor search didn't find the uncle's parent,
3838  // then the uncle is not yet resolved.
3839  if (ResolvedPad != AncestorPad)
3840  break;
3841  // This uncle is resolved, so pop it from the worklist.
3842  Worklist.pop_back();
3843  }
3844  }
3845  }
3846 
3847  if (FirstUnwindPad) {
3848  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3849  BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3850  Value *SwitchUnwindPad;
3851  if (SwitchUnwindDest)
3852  SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3853  else
3854  SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3855  Assert(SwitchUnwindPad == FirstUnwindPad,
3856  "Unwind edges out of a catch must have the same unwind dest as "
3857  "the parent catchswitch",
3858  &FPI, FirstUser, CatchSwitch);
3859  }
3860  }
3861 
3862  visitInstruction(FPI);
3863 }
3864 
3865 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3866  BasicBlock *BB = CatchSwitch.getParent();
3867 
3868  Function *F = BB->getParent();
3869  Assert(F->hasPersonalityFn(),
3870  "CatchSwitchInst needs to be in a function with a personality.",
3871  &CatchSwitch);
3872 
3873  // The catchswitch instruction must be the first non-PHI instruction in the
3874  // block.
3875  Assert(BB->getFirstNonPHI() == &CatchSwitch,
3876  "CatchSwitchInst not the first non-PHI instruction in the block.",
3877  &CatchSwitch);
3878 
3879  auto *ParentPad = CatchSwitch.getParentPad();
3880  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3881  "CatchSwitchInst has an invalid parent.", ParentPad);
3882 
3883  if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3884  Instruction *I = UnwindDest->getFirstNonPHI();
3885  Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3886  "CatchSwitchInst must unwind to an EH block which is not a "
3887  "landingpad.",
3888  &CatchSwitch);
3889 
3890  // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3891  if (getParentPad(I) == ParentPad)
3892  SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3893  }
3894 
3895  Assert(CatchSwitch.getNumHandlers() != 0,
3896  "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3897 
3898  for (BasicBlock *Handler : CatchSwitch.handlers()) {
3899  Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3900  "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3901  }
3902 
3903  visitEHPadPredecessors(CatchSwitch);
3904  visitTerminator(CatchSwitch);
3905 }
3906 
3907 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3908  Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3909  "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3910  CRI.getOperand(0));
3911 
3912  if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
3913  Instruction *I = UnwindDest->getFirstNonPHI();
3914  Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3915  "CleanupReturnInst must unwind to an EH block which is not a "
3916  "landingpad.",
3917  &CRI);
3918  }
3919 
3920  visitTerminator(CRI);
3921 }
3922 
3923 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
3924  Instruction *Op = cast<Instruction>(I.getOperand(i));
3925  // If the we have an invalid invoke, don't try to compute the dominance.
3926  // We already reject it in the invoke specific checks and the dominance
3927  // computation doesn't handle multiple edges.
3928  if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
3929  if (II->getNormalDest() == II->getUnwindDest())
3930  return;
3931  }
3932 
3933  // Quick check whether the def has already been encountered in the same block.
3934  // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
3935  // uses are defined to happen on the incoming edge, not at the instruction.
3936  //
3937  // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
3938  // wrapping an SSA value, assert that we've already encountered it. See
3939  // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
3940  if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
3941  return;
3942 
3943  const Use &U = I.getOperandUse(i);
3944  Assert(DT.dominates(Op, U),
3945  "Instruction does not dominate all uses!", Op, &I);
3946 }
3947 
3948 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
3949  Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
3950  "apply only to pointer types", &I);
3951  Assert(isa<LoadInst>(I),
3952  "dereferenceable, dereferenceable_or_null apply only to load"
3953  " instructions, use attributes for calls or invokes", &I);
3954  Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
3955  "take one operand!", &I);
3956  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
3957  Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
3958  "dereferenceable_or_null metadata value must be an i64!", &I);
3959 }
3960 
3961 /// verifyInstruction - Verify that an instruction is well formed.
3962 ///
3963 void Verifier::visitInstruction(Instruction &I) {
3964  BasicBlock *BB = I.getParent();
3965  Assert(BB, "Instruction not embedded in basic block!", &I);
3966 
3967  if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
3968  for (User *U : I.users()) {
3969  Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
3970  "Only PHI nodes may reference their own value!", &I);
3971  }
3972  }
3973 
3974  // Check that void typed values don't have names
3975  Assert(!I.getType()->isVoidTy() || !I.hasName(),
3976  "Instruction has a name, but provides a void value!", &I);
3977 
3978  // Check that the return value of the instruction is either void or a legal
3979  // value type.
3980  Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
3981  "Instruction returns a non-scalar type!", &I);
3982 
3983  // Check that the instruction doesn't produce metadata. Calls are already
3984  // checked against the callee type.
3985  Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
3986  "Invalid use of metadata!", &I);
3987 
3988  // Check that all uses of the instruction, if they are instructions
3989  // themselves, actually have parent basic blocks. If the use is not an
3990  // instruction, it is an error!
3991  for (Use &U : I.uses()) {
3992  if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
3993  Assert(Used->getParent() != nullptr,
3994  "Instruction referencing"
3995  " instruction not embedded in a basic block!",
3996  &I, Used);
3997  else {
3998  CheckFailed("Use of instruction is not an instruction!", U);
3999  return;
4000  }
4001  }
4002 
4003  // Get a pointer to the call base of the instruction if it is some form of
4004  // call.
4005  const CallBase *CBI = dyn_cast<CallBase>(&I);
4006 
4007  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4008  Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4009 
4010  // Check to make sure that only first-class-values are operands to
4011  // instructions.
4012  if (!I.getOperand(i)->getType()->isFirstClassType()) {
4013  Assert(false, "Instruction operands must be first-class values!", &I);
4014  }
4015 
4016  if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4017  // Check to make sure that the "address of" an intrinsic function is never
4018  // taken.
4019  Assert(!F->isIntrinsic() ||
4020  (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
4021  "Cannot take the address of an intrinsic!", &I);
4022  Assert(
4023  !F->isIntrinsic() || isa<CallInst>(I) ||
4024  F->getIntrinsicID() == Intrinsic::donothing ||
4025  F->getIntrinsicID() == Intrinsic::coro_resume ||
4026  F->getIntrinsicID() == Intrinsic::coro_destroy ||
4027  F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
4028  F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4029  F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4030  F->getIntrinsicID() == Intrinsic::wasm_rethrow_in_catch,
4031  "Cannot invoke an intrinsic other than donothing, patchpoint, "
4032  "statepoint, coro_resume or coro_destroy",
4033  &I);
4034  Assert(F->getParent() == &M, "Referencing function in another module!",
4035  &I, &M, F, F->getParent());
4036  } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4037  Assert(OpBB->getParent() == BB->getParent(),
4038  "Referring to a basic block in another function!", &I);
4039  } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4040  Assert(OpArg->getParent() == BB->getParent(),
4041  "Referring to an argument in another function!", &I);
4042  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4043  Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
4044  &M, GV, GV->getParent());
4045  } else if (isa<Instruction>(I.getOperand(i))) {
4046  verifyDominatesUse(I, i);
4047  } else if (isa<InlineAsm>(I.getOperand(i))) {
4048  Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4049  "Cannot take the address of an inline asm!", &I);
4050  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4051  if (CE->getType()->isPtrOrPtrVectorTy() ||
4053  // If we have a ConstantExpr pointer, we need to see if it came from an
4054  // illegal bitcast. If the datalayout string specifies non-integral
4055  // address spaces then we also need to check for illegal ptrtoint and
4056  // inttoptr expressions.
4057  visitConstantExprsRecursively(CE);
4058  }
4059  }
4060  }
4061 
4062  if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4064  "fpmath requires a floating point result!", &I);
4065  Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4066  if (ConstantFP *CFP0 =
4067  mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4068  const APFloat &Accuracy = CFP0->getValueAPF();
4069  Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4070  "fpmath accuracy must have float type", &I);
4071  Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4072  "fpmath accuracy not a positive number!", &I);
4073  } else {
4074  Assert(false, "invalid fpmath accuracy!", &I);
4075  }
4076  }
4077 
4078  if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4079  Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4080  "Ranges are only for loads, calls and invokes!", &I);
4081  visitRangeMetadata(I, Range, I.getType());
4082  }
4083 
4085  Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4086  &I);
4087  Assert(isa<LoadInst>(I),
4088  "nonnull applies only to load instructions, use attributes"
4089  " for calls or invokes",
4090  &I);
4091  }
4092 
4094  visitDereferenceableMetadata(I, MD);
4095 
4097  visitDereferenceableMetadata(I, MD);
4098 
4099  if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4100  TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4101 
4102  if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4103  Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4104  &I);
4105  Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4106  "use attributes for calls or invokes", &I);
4107  Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4108  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4109  Assert(CI && CI->getType()->isIntegerTy(64),
4110  "align metadata value must be an i64!", &I);
4111  uint64_t Align = CI->getZExtValue();
4112  Assert(isPowerOf2_64(Align),
4113  "align metadata value must be a power of 2!", &I);
4115  "alignment is larger that implementation defined limit", &I);
4116  }
4117 
4118  if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4119  AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4120  visitMDNode(*N);
4121  }
4122 
4123  if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
4124  verifyFragmentExpression(*DII);
4125 
4126  InstsInThisBlock.insert(&I);
4127 }
4128 
4129 /// Allow intrinsics to be verified in different ways.
4130 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4131  Function *IF = Call.getCalledFunction();
4132  Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4133  IF);
4134 
4135  // Verify that the intrinsic prototype lines up with what the .td files
4136  // describe.
4137  FunctionType *IFTy = IF->getFunctionType();
4138  bool IsVarArg = IFTy->isVarArg();
4139 
4141  getIntrinsicInfoTableEntries(ID, Table);
4143 
4144  SmallVector<Type *, 4> ArgTys;
4145  Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(),
4146  TableRef, ArgTys),
4147  "Intrinsic has incorrect return type!", IF);
4148  for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
4149  Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i),
4150  TableRef, ArgTys),
4151  "Intrinsic has incorrect argument type!", IF);
4152 
4153  // Verify if the intrinsic call matches the vararg property.
4154  if (IsVarArg)
4155  Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4156  "Intrinsic was not defined with variable arguments!", IF);
4157  else
4158  Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4159  "Callsite was not defined with variable arguments!", IF);
4160 
4161  // All descriptors should be absorbed by now.
4162  Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4163 
4164  // Now that we have the intrinsic ID and the actual argument types (and we
4165  // know they are legal for the intrinsic!) get the intrinsic name through the
4166  // usual means. This allows us to verify the mangling of argument types into
4167  // the name.
4168  const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4169  Assert(ExpectedName == IF->getName(),
4170  "Intrinsic name not mangled correctly for type arguments! "
4171  "Should be: " +
4172  ExpectedName,
4173  IF);
4174 
4175  // If the intrinsic takes MDNode arguments, verify that they are either global
4176  // or are local to *this* function.
4177  for (Value *V : Call.args())
4178  if (auto *MD = dyn_cast<MetadataAsValue>(V))
4179  visitMetadataAsValue(*MD, Call.getCaller());
4180 
4181  switch (ID) {
4182  default:
4183  break;
4184  case Intrinsic::coro_id: {
4185  auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4186  if (isa<ConstantPointerNull>(InfoArg))
4187  break;
4188  auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4189  Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4190  "info argument of llvm.coro.begin must refer to an initialized "
4191  "constant");
4192  Constant *Init = GV->getInitializer();
4193  Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4194  "info argument of llvm.coro.begin must refer to either a struct or "
4195  "an array");
4196  break;
4197  }
4198  case Intrinsic::experimental_constrained_fadd:
4199  case Intrinsic::experimental_constrained_fsub:
4200  case Intrinsic::experimental_constrained_fmul:
4201  case Intrinsic::experimental_constrained_fdiv:
4202  case Intrinsic::experimental_constrained_frem:
4203  case Intrinsic::experimental_constrained_fma:
4204  case Intrinsic::experimental_constrained_sqrt:
4205  case Intrinsic::experimental_constrained_pow:
4206  case Intrinsic::experimental_constrained_powi:
4207  case Intrinsic::experimental_constrained_sin:
4208  case Intrinsic::experimental_constrained_cos:
4209  case Intrinsic::experimental_constrained_exp:
4210  case Intrinsic::experimental_constrained_exp2:
4211  case Intrinsic::experimental_constrained_log:
4212  case Intrinsic::experimental_constrained_log10:
4213  case Intrinsic::experimental_constrained_log2:
4214  case Intrinsic::experimental_constrained_rint:
4215  case Intrinsic::experimental_constrained_nearbyint:
4216  case Intrinsic::experimental_constrained_maxnum:
4217  case Intrinsic::experimental_constrained_minnum:
4218  case Intrinsic::experimental_constrained_ceil:
4219  case Intrinsic::experimental_constrained_floor:
4220  case Intrinsic::experimental_constrained_round:
4221  case Intrinsic::experimental_constrained_trunc:
4222  visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4223  break;
4224  case Intrinsic::dbg_declare: // llvm.dbg.declare
4225  Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
4226  "invalid llvm.dbg.declare intrinsic call 1", Call);
4227  visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4228  break;
4229  case Intrinsic::dbg_addr: // llvm.dbg.addr
4230  visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4231  break;
4232  case Intrinsic::dbg_value: // llvm.dbg.value
4233  visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4234  break;
4235  case Intrinsic::dbg_label: // llvm.dbg.label
4236  visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4237  break;
4238  case Intrinsic::memcpy:
4239  case Intrinsic::memmove:
4240  case Intrinsic::memset: {
4241  const auto *MI = cast<MemIntrinsic>(&Call);
4242  auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4243  return Alignment == 0 || isPowerOf2_32(Alignment);
4244  };
4245  Assert(IsValidAlignment(MI->getDestAlignment()),
4246  "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4247  Call);
4248  if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4249  Assert(IsValidAlignment(MTI->getSourceAlignment()),
4250  "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4251  Call);
4252  }
4253 
4254  break;
4255  }
4256  case Intrinsic::memcpy_element_unordered_atomic:
4257  case Intrinsic::memmove_element_unordered_atomic:
4258  case Intrinsic::memset_element_unordered_atomic: {
4259  const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4260 
4261  ConstantInt *ElementSizeCI =
4262  cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4263  const APInt &ElementSizeVal = ElementSizeCI->getValue();
4264  Assert(ElementSizeVal.isPowerOf2(),
4265  "element size of the element-wise atomic memory intrinsic "
4266  "must be a power of 2",
4267  Call);
4268 
4269  if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
4270  uint64_t Length = LengthCI->getZExtValue();
4271  uint64_t ElementSize = AMI->getElementSizeInBytes();
4272  Assert((Length % ElementSize) == 0,
4273  "constant length must be a multiple of the element size in the "
4274  "element-wise atomic memory intrinsic",
4275  Call);
4276  }
4277 
4278  auto IsValidAlignment = [&](uint64_t Alignment) {
4279  return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4280  };
4281  uint64_t DstAlignment = AMI->getDestAlignment();
4282  Assert(IsValidAlignment(DstAlignment),
4283  "incorrect alignment of the destination argument", Call);
4284  if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4285  uint64_t SrcAlignment = AMT->getSourceAlignment();
4286  Assert(IsValidAlignment(SrcAlignment),
4287  "incorrect alignment of the source argument", Call);
4288  }
4289  break;
4290  }
4291  case Intrinsic::gcroot:
4292  case Intrinsic::gcwrite:
4293  case Intrinsic::gcread:
4294  if (ID == Intrinsic::gcroot) {
4295  AllocaInst *AI =
4297  Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
4298  Assert(isa<Constant>(Call.getArgOperand(1)),
4299  "llvm.gcroot parameter #2 must be a constant.", Call);
4300  if (!AI->getAllocatedType()->isPointerTy()) {
4301  Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
4302  "llvm.gcroot parameter #1 must either be a pointer alloca, "
4303  "or argument #2 must be a non-null constant.",
4304  Call);
4305  }
4306  }
4307 
4308  Assert(Call.getParent()->getParent()->hasGC(),
4309  "Enclosing function does not use GC.", Call);
4310  break;
4311  case Intrinsic::init_trampoline:
4312  Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
4313  "llvm.init_trampoline parameter #2 must resolve to a function.",
4314  Call);
4315  break;
4316  case Intrinsic::prefetch:
4317  Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
4318  cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
4319  "invalid arguments to llvm.prefetch", Call);
4320  break;
4321  case Intrinsic::stackprotector:
4322  Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
4323  "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
4324  break;
4325  case Intrinsic::localescape: {
4326  BasicBlock *BB = Call.getParent();
4327  Assert(BB == &BB->getParent()->front(),
4328  "llvm.localescape used outside of entry block", Call);
4329  Assert(!SawFrameEscape,
4330  "multiple calls to llvm.localescape in one function", Call);
4331  for (Value *Arg : Call.args()) {
4332  if (isa<ConstantPointerNull>(Arg))
4333  continue; // Null values are allowed as placeholders.
4334  auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4335  Assert(AI && AI->isStaticAlloca(),
4336  "llvm.localescape only accepts static allocas", Call);
4337  }
4338  FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
4339  SawFrameEscape = true;
4340  break;
4341  }
4342  case Intrinsic::localrecover: {
4343  Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
4344  Function *Fn = dyn_cast<Function>(FnArg);
4345  Assert(Fn && !Fn->isDeclaration(),
4346  "llvm.localrecover first "
4347  "argument must be function defined in this module",
4348  Call);
4349  auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
4350  auto &Entry = FrameEscapeInfo[Fn];
4351  Entry.second = unsigned(
4352  std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4353  break;
4354  }
4355 
4356  case Intrinsic::experimental_gc_statepoint:
4357  if (auto *CI = dyn_cast<CallInst>(&Call))
4358  Assert(!CI->isInlineAsm(),
4359  "gc.statepoint support for inline assembly unimplemented", CI);
4360  Assert(Call.getParent()->getParent()->hasGC(),
4361  "Enclosing function does not use GC.", Call);
4362 
4363  verifyStatepoint(Call);
4364  break;
4365  case Intrinsic::experimental_gc_result: {
4366  Assert(Call.getParent()->getParent()->hasGC(),
4367  "Enclosing function does not use GC.", Call);
4368  // Are we tied to a statepoint properly?
4369  const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
4370  const Function *StatepointFn =
4371  StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
4372  Assert(StatepointFn && StatepointFn->isDeclaration() &&
4373  StatepointFn->getIntrinsicID() ==
4374  Intrinsic::experimental_gc_statepoint,
4375  "gc.result operand #1 must be from a statepoint", Call,
4376  Call.getArgOperand(0));
4377 
4378  // Assert that result type matches wrapped callee.
4379  const Value *Target = StatepointCall->getArgOperand(2);
4380  auto *PT = cast<PointerType>(Target->getType());
4381  auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4382  Assert(Call.getType() == TargetFuncType->getReturnType(),
4383  "gc.result result type does not match wrapped callee", Call);
4384  break;
4385  }
4386  case Intrinsic::experimental_gc_relocate: {
4387  Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
4388 
4389  Assert(isa<PointerType>(Call.getType()->getScalarType()),
4390  "gc.relocate must return a pointer or a vector of pointers", Call);
4391 
4392  // Check that this relocate is correctly tied to the statepoint
4393 
4394  // This is case for relocate on the unwinding path of an invoke statepoint
4395  if (LandingPadInst *LandingPad =
4396  dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
4397 
4398  const BasicBlock *InvokeBB =
4399  LandingPad->getParent()->getUniquePredecessor();
4400 
4401  // Landingpad relocates should have only one predecessor with invoke
4402  // statepoint terminator
4403  Assert(InvokeBB, "safepoints should have unique landingpads",
4404  LandingPad->getParent());
4405  Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4406  InvokeBB);
4407  Assert(isStatepoint(InvokeBB->getTerminator()),
4408  "gc relocate should be linked to a statepoint", InvokeBB);
4409  } else {
4410  // In all other cases relocate should be tied to the statepoint directly.
4411  // This covers relocates on a normal return path of invoke statepoint and
4412  // relocates of a call statepoint.
4413  auto Token = Call.getArgOperand(0);
4414  Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4415  "gc relocate is incorrectly tied to the statepoint", Call, Token);
4416  }
4417 
4418  // Verify rest of the relocate arguments.
4419  const CallBase &StatepointCall =
4420  *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
4421 
4422  // Both the base and derived must be piped through the safepoint.
4423  Value *Base = Call.getArgOperand(1);
4424  Assert(isa<ConstantInt>(Base),
4425  "gc.relocate operand #2 must be integer offset", Call);
4426 
4427  Value *Derived = Call.getArgOperand(2);
4428  Assert(isa<ConstantInt>(Derived),
4429  "gc.relocate operand #3 must be integer offset", Call);
4430 
4431  const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4432  const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4433  // Check the bounds
4434  Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
4435  "gc.relocate: statepoint base index out of bounds", Call);
4436  Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
4437  "gc.relocate: statepoint derived index out of bounds", Call);
4438 
4439  // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4440  // section of the statepoint's argument.
4441  Assert(StatepointCall.arg_size() > 0,
4442  "gc.statepoint: insufficient arguments");
4443  Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
4444  "gc.statement: number of call arguments must be constant integer");
4445  const unsigned NumCallArgs =
4446  cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
4447  Assert(StatepointCall.arg_size() > NumCallArgs + 5,
4448  "gc.statepoint: mismatch in number of call arguments");
4449  Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
4450  "gc.statepoint: number of transition arguments must be "
4451  "a constant integer");
4452  const int NumTransitionArgs =
4453  cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
4454  ->getZExtValue();
4455  const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4456  Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
4457  "gc.statepoint: number of deoptimization arguments must be "
4458  "a constant integer");
4459  const int NumDeoptArgs =
4460  cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
4461  ->getZExtValue();
4462  const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4463  const int GCParamArgsEnd = StatepointCall.arg_size();
4464  Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4465  "gc.relocate: statepoint base index doesn't fall within the "
4466  "'gc parameters' section of the statepoint call",
4467  Call);
4468  Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4469  "gc.relocate: statepoint derived index doesn't fall within the "
4470  "'gc parameters' section of the statepoint call",
4471  Call);
4472 
4473  // Relocated value must be either a pointer type or vector-of-pointer type,
4474  // but gc_relocate does not need to return the same pointer type as the
4475  // relocated pointer. It can be casted to the correct type later if it's
4476  // desired. However, they must have the same address space and 'vectorness'
4477  GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
4479  "gc.relocate: relocated value must be a gc pointer", Call);
4480 
4481  auto ResultType = Call.getType();
4482  auto DerivedType = Relocate.getDerivedPtr()->getType();
4483  Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4484  "gc.relocate: vector relocates to vector and pointer to pointer",
4485  Call);
4486  Assert(
4487  ResultType->getPointerAddressSpace() ==
4488  DerivedType->getPointerAddressSpace(),
4489  "gc.relocate: relocating a pointer shouldn't change its address space",
4490  Call);
4491  break;
4492  }
4493  case Intrinsic::eh_exceptioncode:
4494  case Intrinsic::eh_exceptionpointer: {
4495  Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
4496  "eh.exceptionpointer argument must be a catchpad", Call);
4497  break;
4498  }
4499  case Intrinsic::masked_load: {
4500  Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
4501  Call);
4502 
4503  Value *Ptr = Call.getArgOperand(0);
4504  ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
4505  Value *Mask = Call.getArgOperand(2);
4506  Value *PassThru = Call.getArgOperand(3);
4507  Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
4508  Call);
4509  Assert(Alignment->getValue().isPowerOf2(),
4510  "masked_load: alignment must be a power of 2", Call);
4511 
4512  // DataTy is the overloaded type
4513  Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4514  Assert(DataTy == Call.getType(),
4515  "masked_load: return must match pointer type", Call);
4516  Assert(PassThru->getType() == DataTy,
4517  "masked_load: pass through and data type must match", Call);
4518  Assert(Mask->getType()->getVectorNumElements() ==
4519  DataTy->getVectorNumElements(),
4520  "masked_load: vector mask must be same length as data", Call);
4521  break;
4522  }
4523  case Intrinsic::masked_store: {
4524  Value *Val = Call.getArgOperand(0);
4525  Value *Ptr = Call.getArgOperand(1);
4526  ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
4527  Value *Mask = Call.getArgOperand(3);
4528  Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
4529  Call);
4530  Assert(Alignment->getValue().isPowerOf2(),
4531  "masked_store: alignment must be a power of 2", Call);
4532 
4533  // DataTy is the overloaded type
4534  Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4535  Assert(DataTy == Val->getType(),
4536  "masked_store: storee must match pointer type", Call);
4537  Assert(Mask->getType()->getVectorNumElements() ==
4538  DataTy->getVectorNumElements(),
4539  "masked_store: vector mask must be same length as data", Call);
4540  break;
4541  }
4542 
4543  case Intrinsic::experimental_guard: {
4544  Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
4546  "experimental_guard must have exactly one "
4547  "\"deopt\" operand bundle");
4548  break;
4549  }
4550 
4551  case Intrinsic::experimental_deoptimize: {
4552  Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
4553  Call);
4555  "experimental_deoptimize must have exactly one "
4556  "\"deopt\" operand bundle");
4557  Assert(Call.getType() == Call.getFunction()->getReturnType(),
4558  "experimental_deoptimize return type must match caller return type");
4559 
4560  if (isa<CallInst>(Call)) {
4561  auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
4562  Assert(RI,
4563  "calls to experimental_deoptimize must be followed by a return");
4564 
4565  if (!Call.getType()->isVoidTy() && RI)
4566  Assert(RI->getReturnValue() == &Call,
4567  "calls to experimental_deoptimize must be followed by a return "
4568  "of the value computed by experimental_deoptimize");
4569  }
4570 
4571  break;
4572  }
4573  case Intrinsic::sadd_sat:
4574  case Intrinsic::uadd_sat:
4575  case Intrinsic::ssub_sat:
4576  case Intrinsic::usub_sat: {
4577  Value *Op1 = Call.getArgOperand(0);
4578  Value *Op2 = Call.getArgOperand(1);
4579  Assert(Op1->getType()->isIntOrIntVectorTy(),
4580  "first operand of [us][add|sub]_sat must be an int type or vector "
4581  "of ints");
4582  Assert(Op2->getType()->isIntOrIntVectorTy(),
4583  "second operand of [us][add|sub]_sat must be an int type or vector "
4584  "of ints");
4585  break;
4586  }
4587  case Intrinsic::smul_fix:
4588  case Intrinsic::umul_fix: {
4589  Value *Op1 = Call.getArgOperand(0);
4590  Value *Op2 = Call.getArgOperand(1);
4591  Assert(Op1->getType()->isIntOrIntVectorTy(),
4592  "first operand of [us]mul_fix must be an int type or vector "
4593  "of ints");
4594  Assert(Op2->getType()->isIntOrIntVectorTy(),
4595  "second operand of [us]mul_fix must be an int type or vector "
4596  "of ints");
4597 
4598  auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
4599  Assert(Op3->getType()->getBitWidth() <= 32,
4600  "third argument of [us]mul_fix must fit within 32 bits");
4601 
4602  if (ID == Intrinsic::smul_fix) {
4603  Assert(
4604  Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4605  "the scale of smul_fix must be less than the width of the operands");
4606  } else {
4607  Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
4608  "the scale of umul_fix must be less than or equal to the width of "
4609  "the operands");
4610  }
4611  break;
4612  }
4613  };
4614 }
4615 
4616 /// Carefully grab the subprogram from a local scope.
4617 ///
4618 /// This carefully grabs the subprogram from a local scope, avoiding the
4619 /// built-in assertions that would typically fire.
4620 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4621  if (!LocalScope)
4622  return nullptr;
4623 
4624  if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4625  return SP;
4626 
4627  if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4628  return getSubprogram(LB->getRawScope());
4629 
4630  // Just return null; broken scope chains are checked elsewhere.
4631  assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4632  return nullptr;
4633 }
4634 
4635 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4636  unsigned NumOperands = FPI.getNumArgOperands();
4637  Assert(((NumOperands == 5 && FPI.isTernaryOp()) ||
4638  (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)),
4639  "invalid arguments for constrained FP intrinsic", &FPI);
4640  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)),
4641  "invalid exception behavior argument", &FPI);
4642  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)),
4643  "invalid rounding mode argument", &FPI);
4645  "invalid rounding mode argument", &FPI);
4647  "invalid exception behavior argument", &FPI);
4648 }
4649 
4650 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
4651  auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4652  AssertDI(isa<ValueAsMetadata>(MD) ||
4653  (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4654  "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4655  AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4656  "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4657  DII.getRawVariable());
4658  AssertDI(isa<DIExpression>(DII.getRawExpression()),
4659  "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4660  DII.getRawExpression());
4661 
4662  // Ignore broken !dbg attachments; they're checked elsewhere.
4663  if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4664  if (!isa<DILocation>(N))
4665  return;
4666 
4667  BasicBlock *BB = DII.getParent();
4668  Function *F = BB ? BB->getParent() : nullptr;
4669 
4670  // The scopes for variables and !dbg attachments must agree.
4671  DILocalVariable *Var = DII.getVariable();
4672  DILocation *Loc = DII.getDebugLoc();
4673  AssertDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4674  &DII, BB, F);
4675 
4676  DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4677  DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4678  if (!VarSP || !LocSP)
4679  return; // Broken scope chains are checked elsewhere.
4680 
4681  AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4682  " variable and !dbg attachment",
4683  &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4684  Loc->getScope()->getSubprogram());
4685 
4686  // This check is redundant with one in visitLocalVariable().
4687  AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
4688  Var->getRawType());
4689  if (auto *Type = dyn_cast_or_null<DIType>(Var->getRawType()))
4690  if (Type->isBlockByrefStruct())
4692  "BlockByRef variable without complex expression", Var, &DII);
4693 
4694  verifyFnArgs(DII);
4695 }
4696 
4697 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
4698  AssertDI(isa<DILabel>(DLI.getRawLabel()),
4699  "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
4700  DLI.getRawLabel());
4701 
4702  // Ignore broken !dbg attachments; they're checked elsewhere.
4703  if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
4704  if (!isa<DILocation>(N))
4705  return;
4706 
4707  BasicBlock *BB = DLI.getParent();
4708  Function *F = BB ? BB->getParent() : nullptr;
4709 
4710  // The scopes for variables and !dbg attachments must agree.
4711  DILabel *Label = DLI.getLabel();
4712  DILocation *Loc = DLI.getDebugLoc();
4713  Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4714  &DLI, BB, F);
4715 
4716  DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
4717  DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4718  if (!LabelSP || !LocSP)
4719  return;
4720 
4721  AssertDI(LabelSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4722  " label and !dbg attachment",
4723  &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
4724  Loc->getScope()->getSubprogram());
4725 }
4726 
4727 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
4728  DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4729  DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4730 
4731  // We don't know whether this intrinsic verified correctly.
4732  if (!V || !E || !E->isValid())
4733  return;
4734 
4735  // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4736  auto Fragment = E->getFragmentInfo();
4737  if (!Fragment)
4738  return;
4739 
4740  // The frontend helps out GDB by emitting the members of local anonymous
4741  // unions as artificial local variables with shared storage. When SROA splits
4742  // the storage for artificial local variables that are smaller than the entire
4743  // union, the overhang piece will be outside of the allotted space for the
4744  // variable and this check fails.
4745  // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4746  if (V->isArtificial())
4747  return;
4748 
4749  verifyFragmentExpression(*V, *Fragment, &I);
4750 }
4751 
4752 template <typename ValueOrMetadata>
4753 void Verifier::verifyFragmentExpression(const DIVariable &V,
4754  DIExpression::FragmentInfo Fragment,
4755  ValueOrMetadata *Desc) {
4756  // If there's no size, the type is broken, but that should be checked
4757  // elsewhere.
4758  auto VarSize = V.getSizeInBits();
4759  if (!VarSize)
4760  return;
4761 
4762  unsigned FragSize = Fragment.SizeInBits;
4763  unsigned FragOffset = Fragment.OffsetInBits;
4764  AssertDI(FragSize + FragOffset <= *VarSize,
4765  "fragment is larger than or outside of variable", Desc, &V);
4766  AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
4767 }
4768 
4769 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
4770  // This function does not take the scope of noninlined function arguments into
4771  // account. Don't run it if current function is nodebug, because it may
4772  // contain inlined debug intrinsics.
4773  if (!HasDebugInfo)
4774  return;
4775 
4776  // For performance reasons only check non-inlined ones.
4777  if (I.getDebugLoc()->getInlinedAt())
4778  return;
4779 
4780  DILocalVariable *Var = I.getVariable();
4781  AssertDI(Var, "dbg intrinsic without variable");
4782 
4783  unsigned ArgNo = Var->getArg();
4784  if (!ArgNo)
4785  return;
4786 
4787  // Verify there are no duplicate function argument debug info entries.
4788  // These will cause hard-to-debug assertions in the DWARF backend.
4789  if (DebugFnArgs.size() < ArgNo)
4790  DebugFnArgs.resize(ArgNo, nullptr);
4791 
4792  auto *Prev = DebugFnArgs[ArgNo - 1];
4793  DebugFnArgs[ArgNo - 1] = Var;
4794  AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
4795  Prev, Var);
4796 }
4797 
4798 void Verifier::verifyCompileUnits() {
4799  // When more than one Module is imported into the same context, such as during
4800  // an LTO build before linking the modules, ODR type uniquing may cause types
4801  // to point to a different CU. This check does not make sense in this case.
4803  return;
4804  auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
4806  if (CUs)
4807  Listed.insert(CUs->op_begin(), CUs->op_end());
4808  for (auto *CU : CUVisited)
4809  AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
4810  CUVisited.clear();
4811 }
4812 
4813 void Verifier::verifyDeoptimizeCallingConvs() {
4814  if (DeoptimizeDeclarations.empty())
4815  return;
4816 
4817  const Function *First = DeoptimizeDeclarations[0];
4818  for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
4819  Assert(First->getCallingConv() == F->getCallingConv(),
4820  "All llvm.experimental.deoptimize declarations must have the same "
4821  "calling convention",
4822  First, F);
4823  }
4824 }
4825 
4826 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
4827  bool HasSource = F.getSource().hasValue();
4828  if (!HasSourceDebugInfo.count(&U))
4829  HasSourceDebugInfo[&U] = HasSource;
4830  AssertDI(HasSource == HasSourceDebugInfo[&U],
4831  "inconsistent use of embedded source");
4832 }
4833 
4834 //===----------------------------------------------------------------------===//
4835 // Implement the public interfaces to this file...
4836 //===----------------------------------------------------------------------===//
4837 
4839  Function &F = const_cast<Function &>(f);
4840 
4841  // Don't use a raw_null_ostream. Printing IR is expensive.
4842  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
4843 
4844  // Note that this function's return value is inverted from what you would
4845  // expect of a function called "verify".
4846  return !V.verify(F);
4847 }
4848 
4850  bool *BrokenDebugInfo) {
4851  // Don't use a raw_null_ostream. Printing IR is expensive.
4852  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
4853 
4854  bool Broken = false;
4855  for (const Function &F : M)
4856  Broken |= !V.verify(F);
4857 
4858  Broken |= !V.verify();
4859  if (BrokenDebugInfo)
4860  *BrokenDebugInfo = V.hasBrokenDebugInfo();
4861  // Note that this function's return value is inverted from what you would
4862  // expect of a function called "verify".
4863  return Broken;
4864 }
4865 
4866 namespace {
4867 
4868 struct VerifierLegacyPass : public FunctionPass {
4869  static char ID;
4870 
4871  std::unique_ptr<Verifier> V;
4872  bool FatalErrors = true;
4873 
4874  VerifierLegacyPass() : FunctionPass(ID) {
4876  }
4877  explicit VerifierLegacyPass(bool FatalErrors)
4878  : FunctionPass(ID),
4879  FatalErrors(FatalErrors) {
4881  }
4882 
4883  bool doInitialization(Module &M) override {
4884  V = llvm::make_unique<Verifier>(
4885  &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
4886  return false;
4887  }
4888 
4889  bool runOnFunction(Function &F) override {
4890  if (!V->verify(F) && FatalErrors) {
4891  errs() << "in function " << F.getName() << '\n';
4892  report_fatal_error("Broken function found, compilation aborted!");
4893  }
4894  return false;
4895  }
4896 
4897  bool doFinalization(Module &M) override {
4898  bool HasErrors = false;
4899  for (Function &F : M)
4900  if (F.isDeclaration())
4901  HasErrors |= !V->verify(F);
4902 
4903  HasErrors |= !V->verify();
4904  if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
4905  report_fatal_error("Broken module found, compilation aborted!");
4906  return false;
4907  }
4908 
4909  void getAnalysisUsage(AnalysisUsage &AU) const override {
4910  AU.setPreservesAll();
4911  }
4912 };
4913 
4914 } // end anonymous namespace
4915 
4916 /// Helper to issue failure from the TBAA verification
4917 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
4918  if (Diagnostic)
4919  return Diagnostic->CheckFailed(Args...);
4920 }
4921 
4922 #define AssertTBAA(C, ...) \
4923  do { \
4924  if (!(C)) { \
4925  CheckFailed(__VA_ARGS__); \
4926  return false; \
4927  } \
4928  } while (false)
4929 
4930 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
4931 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a
4932 /// struct-type node describing an aggregate data structure (like a struct).
4933 TBAAVerifier::TBAABaseNodeSummary
4934 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
4935  bool IsNewFormat) {
4936  if (BaseNode->getNumOperands() < 2) {
4937  CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
4938  return {true, ~0u};
4939  }
4940 
4941  auto Itr = TBAABaseNodes.find(BaseNode);
4942  if (Itr != TBAABaseNodes.end())
4943  return Itr->second;
4944 
4945  auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
4946  auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
4947  (void)InsertResult;
4948  assert(InsertResult.second && "We just checked!");
4949  return Result;
4950 }
4951 
4952 TBAAVerifier::TBAABaseNodeSummary
4953 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
4954  bool IsNewFormat) {
4955  const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
4956 
4957  if (BaseNode->getNumOperands() == 2) {
4958  // Scalar nodes can only be accessed at offset 0.
4959  return isValidScalarTBAANode(BaseNode)
4960  ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
4961  : InvalidNode;
4962  }
4963 
4964  if (IsNewFormat) {
4965  if (BaseNode->getNumOperands() % 3 != 0) {
4966  CheckFailed("Access tag nodes must have the number of operands that is a "
4967  "multiple of 3!", BaseNode);
4968  return InvalidNode;
4969  }
4970  } else {
4971  if (BaseNode->getNumOperands() % 2 != 1) {
4972  CheckFailed("Struct tag nodes must have an odd number of operands!",
4973  BaseNode);
4974  return InvalidNode;
4975  }
4976  }
4977 
4978  // Check the type size field.
4979  if (IsNewFormat) {
4980  auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
4981  BaseNode->getOperand(1));
4982  if (!TypeSizeNode) {
4983  CheckFailed("Type size nodes must be constants!", &I, BaseNode);
4984  return InvalidNode;
4985  }
4986  }
4987 
4988  // Check the type name field. In the new format it can be anything.
4989  if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
4990  CheckFailed("Struct tag nodes have a string as their first operand",
4991  BaseNode);
4992  return InvalidNode;
4993  }
4994 
4995  bool Failed = false;
4996 
4997  Optional<APInt> PrevOffset;
4998  unsigned BitWidth = ~0u;
4999 
5000  // We've already checked that BaseNode is not a degenerate root node with one
5001  // operand in \c verifyTBAABaseNode, so this loop should run at least once.
5002  unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5003  unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5004  for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5005  Idx += NumOpsPerField) {
5006  const MDOperand &FieldTy = BaseNode->getOperand(Idx);
5007  const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
5008  if (!isa<MDNode>(FieldTy)) {
5009  CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
5010  Failed = true;
5011  continue;
5012  }
5013 
5014  auto *OffsetEntryCI =
5015  mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
5016  if (!OffsetEntryCI) {
5017  CheckFailed("Offset entries must be constants!", &I, BaseNode);
5018  Failed = true;
5019  continue;
5020  }
5021 
5022  if (BitWidth == ~0u)
5023  BitWidth = OffsetEntryCI->getBitWidth();
5024 
5025  if (OffsetEntryCI->getBitWidth() != BitWidth) {
5026  CheckFailed(
5027  "Bitwidth between the offsets and struct type entries must match", &I,
5028  BaseNode);
5029  Failed = true;
5030  continue;
5031  }
5032 
5033  // NB! As far as I can tell, we generate a non-strictly increasing offset
5034  // sequence only from structs that have zero size bit fields. When
5035  // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
5036  // pick the field lexically the latest in struct type metadata node. This
5037  // mirrors the actual behavior of the alias analysis implementation.
5038  bool IsAscending =
5039  !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
5040 
5041  if (!IsAscending) {
5042  CheckFailed("Offsets must be increasing!", &I, BaseNode);
5043  Failed = true;
5044  }
5045 
5046  PrevOffset = OffsetEntryCI->getValue();
5047 
5048  if (IsNewFormat) {
5049  auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5050  BaseNode->getOperand(Idx + 2));
5051  if (!MemberSizeNode) {
5052  CheckFailed("Member size entries must be constants!", &I, BaseNode);
5053  Failed = true;
5054  continue;
5055  }
5056  }
5057  }
5058 
5059  return Failed ? InvalidNode
5060  : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
5061 }
5062 
5063 static bool IsRootTBAANode(const MDNode *MD) {
5064  return MD->getNumOperands() < 2;
5065 }
5066 
5067 static bool IsScalarTBAANodeImpl(const MDNode *MD,
5069  if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
5070  return false;
5071 
5072  if (!isa<MDString>(MD->getOperand(0)))
5073  return false;
5074 
5075  if (MD->getNumOperands() == 3) {
5076  auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
5077  if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
5078  return false;
5079  }
5080 
5081  auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5082  return Parent && Visited.insert(Parent).second &&
5083  (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
5084 }
5085 
5086 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
5087  auto ResultIt = TBAAScalarNodes.find(MD);
5088  if (ResultIt != TBAAScalarNodes.end())
5089  return ResultIt->second;
5090 
5092  bool Result = IsScalarTBAANodeImpl(MD, Visited);
5093  auto InsertResult = TBAAScalarNodes.insert({MD, Result});
5094  (void)InsertResult;
5095  assert(InsertResult.second && "Just checked!");
5096 
5097  return Result;
5098 }
5099 
5100 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
5101 /// Offset in place to be the offset within the field node returned.
5102 ///
5103 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
5104 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
5105  const MDNode *BaseNode,
5106  APInt &Offset,
5107  bool IsNewFormat) {
5108  assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
5109 
5110  // Scalar nodes have only one possible "field" -- their parent in the access
5111  // hierarchy. Offset must be zero at this point, but our caller is supposed
5112  // to Assert that.
5113  if (BaseNode->getNumOperands() == 2)
5114  return cast<MDNode>(BaseNode->getOperand(1));
5115 
5116  unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5117  unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5118  for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5119  Idx += NumOpsPerField) {
5120  auto *OffsetEntryCI =
5121  mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
5122  if (OffsetEntryCI->getValue().ugt(Offset)) {
5123  if (Idx == FirstFieldOpNo) {
5124  CheckFailed("Could not find TBAA parent in struct type node", &I,
5125  BaseNode, &Offset);
5126  return nullptr;
5127  }
5128 
5129  unsigned PrevIdx = Idx - NumOpsPerField;
5130  auto *PrevOffsetEntryCI =
5131  mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
5132  Offset -= PrevOffsetEntryCI->getValue();
5133  return cast<MDNode>(BaseNode->getOperand(PrevIdx));
5134  }
5135  }
5136 
5137  unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
5138  auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
5139  BaseNode->getOperand(LastIdx + 1));
5140  Offset -= LastOffsetEntryCI->getValue();
5141  return cast<MDNode>(BaseNode->getOperand(LastIdx));
5142 }
5143 
5145  if (!Type || Type->getNumOperands() < 3)
5146  return false;
5147 
5148  // In the new format type nodes shall have a reference to the parent type as
5149  // its first operand.
5150  MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
5151  if (!Parent)
5152  return false;
5153 
5154  return true;
5155 }
5156 
5158  AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5159  isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
5160  isa<AtomicCmpXchgInst>(I),
5161  "This instruction shall not have a TBAA access tag!", &I);
5162 
5163  bool IsStructPathTBAA =
5164  isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
5165 
5166  AssertTBAA(
5167  IsStructPathTBAA,
5168  "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
5169 
5170  MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
5171  MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5172 
5173  bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
5174 
5175  if (IsNewFormat) {
5176  AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
5177  "Access tag metadata must have either 4 or 5 operands", &I, MD);
5178  } else {
5179  AssertTBAA(MD->getNumOperands() < 5,
5180  "Struct tag metadata must have either 3 or 4 operands", &I, MD);
5181  }
5182 
5183  // Check the access size field.
5184  if (IsNewFormat) {
5185  auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5186  MD->getOperand(3));
5187  AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
5188  }
5189 
5190  // Check the immutability flag.
5191  unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
5192  if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
5193  auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
5194  MD->getOperand(ImmutabilityFlagOpNo));
5195  AssertTBAA(IsImmutableCI,
5196  "Immutability tag on struct tag metadata must be a constant",
5197  &I, MD);
5198  AssertTBAA(
5199  IsImmutableCI->isZero() || IsImmutableCI->isOne(),
5200  "Immutability part of the struct tag metadata must be either 0 or 1",
5201  &I, MD);
5202  }
5203 
5204  AssertTBAA(BaseNode && AccessType,
5205  "Malformed struct tag metadata: base and access-type "
5206  "should be non-null and point to Metadata nodes",
5207  &I, MD, BaseNode, AccessType);
5208 
5209  if (!IsNewFormat) {