LLVM  9.0.0svn
Verifier.cpp
Go to the documentation of this file.
1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // sanity checking of input to the system.
11 //
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
14 //
15 // * Both of a binary operator's parameters are of the same type
16 // * Verify that the indices of mem access instructions match other operands
17 // * Verify that arithmetic and other things are only performed on first-class
18 // types. Verify that shifts & logicals only happen on integrals f.e.
19 // * All of the constants in a switch statement are of the correct type
20 // * The code is in valid SSA form
21 // * It should be illegal to put a label into any other type (like a structure)
22 // or to return one. [except constant arrays!]
23 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 // * PHI nodes must have an entry for each predecessor, with no extras.
25 // * PHI nodes must be the first thing in a basic block, all grouped together
26 // * PHI nodes must have at least one entry
27 // * All basic blocks should only end with terminator insts, not contain them
28 // * The entry node to a function must not have predecessors
29 // * All Instructions must be embedded into a basic block
30 // * Functions cannot take a void-typed parameter
31 // * Verify that a function's argument list agrees with it's declared type.
32 // * It is illegal to specify a name for a void value.
33 // * It is illegal to have a internal global value with no initializer
34 // * It is illegal to have a ret instruction that returns a value that does not
35 // agree with the function return value type.
36 // * Function call argument types match the function prototype
37 // * A landing pad is defined by a landingpad instruction, and can be jumped to
38 // only by the unwind edge of an invoke instruction.
39 // * A landingpad instruction must be the first non-PHI instruction in the
40 // block.
41 // * Landingpad instructions must be in a function with a personality function.
42 // * All other things that are tested by asserts spread about the code...
43 //
44 //===----------------------------------------------------------------------===//
45 
46 #include "llvm/IR/Verifier.h"
47 #include "llvm/ADT/APFloat.h"
48 #include "llvm/ADT/APInt.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/Optional.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/ADT/ilist.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/CallingConv.h"
68 #include "llvm/IR/Comdat.h"
69 #include "llvm/IR/Constant.h"
70 #include "llvm/IR/ConstantRange.h"
71 #include "llvm/IR/Constants.h"
72 #include "llvm/IR/DataLayout.h"
73 #include "llvm/IR/DebugInfo.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/GlobalVariable.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstVisitor.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/LLVMContext.h"
90 #include "llvm/IR/Metadata.h"
91 #include "llvm/IR/Module.h"
93 #include "llvm/IR/PassManager.h"
94 #include "llvm/IR/Statepoint.h"
95 #include "llvm/IR/Type.h"
96 #include "llvm/IR/Use.h"
97 #include "llvm/IR/User.h"
98 #include "llvm/IR/Value.h"
99 #include "llvm/Pass.h"
101 #include "llvm/Support/Casting.h"
103 #include "llvm/Support/Debug.h"
105 #include "llvm/Support/MathExtras.h"
107 #include <algorithm>
108 #include <cassert>
109 #include <cstdint>
110 #include <memory>
111 #include <string>
112 #include <utility>
113 
114 using namespace llvm;
115 
116 namespace llvm {
117 
120  const Module &M;
122  const DataLayout &DL;
124 
125  /// Track the brokenness of the module while recursively visiting.
126  bool Broken = false;
127  /// Broken debug info can be "recovered" from by stripping the debug info.
128  bool BrokenDebugInfo = false;
129  /// Whether to treat broken debug info as an error.
131 
132  explicit VerifierSupport(raw_ostream *OS, const Module &M)
133  : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {}
134 
135 private:
136  void Write(const Module *M) {
137  *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
138  }
139 
140  void Write(const Value *V) {
141  if (V)
142  Write(*V);
143  }
144 
145  void Write(const Value &V) {
146  if (isa<Instruction>(V)) {
147  V.print(*OS, MST);
148  *OS << '\n';
149  } else {
150  V.printAsOperand(*OS, true, MST);
151  *OS << '\n';
152  }
153  }
154 
155  void Write(const Metadata *MD) {
156  if (!MD)
157  return;
158  MD->print(*OS, MST, &M);
159  *OS << '\n';
160  }
161 
162  template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
163  Write(MD.get());
164  }
165 
166  void Write(const NamedMDNode *NMD) {
167  if (!NMD)
168  return;
169  NMD->print(*OS, MST);
170  *OS << '\n';
171  }
172 
173  void Write(Type *T) {
174  if (!T)
175  return;
176  *OS << ' ' << *T;
177  }
178 
179  void Write(const Comdat *C) {
180  if (!C)
181  return;
182  *OS << *C;
183  }
184 
185  void Write(const APInt *AI) {
186  if (!AI)
187  return;
188  *OS << *AI << '\n';
189  }
190 
191  void Write(const unsigned i) { *OS << i << '\n'; }
192 
193  template <typename T> void Write(ArrayRef<T> Vs) {
194  for (const T &V : Vs)
195  Write(V);
196  }
197 
198  template <typename T1, typename... Ts>
199  void WriteTs(const T1 &V1, const Ts &... Vs) {
200  Write(V1);
201  WriteTs(Vs...);
202  }
203 
204  template <typename... Ts> void WriteTs() {}
205 
206 public:
207  /// A check failed, so printout out the condition and the message.
208  ///
209  /// This provides a nice place to put a breakpoint if you want to see why
210  /// something is not correct.
211  void CheckFailed(const Twine &Message) {
212  if (OS)
213  *OS << Message << '\n';
214  Broken = true;
215  }
216 
217  /// A check failed (with values to print).
218  ///
219  /// This calls the Message-only version so that the above is easier to set a
220  /// breakpoint on.
221  template <typename T1, typename... Ts>
222  void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
223  CheckFailed(Message);
224  if (OS)
225  WriteTs(V1, Vs...);
226  }
227 
228  /// A debug info check failed.
229  void DebugInfoCheckFailed(const Twine &Message) {
230  if (OS)
231  *OS << Message << '\n';
232  Broken |= TreatBrokenDebugInfoAsError;
233  BrokenDebugInfo = true;
234  }
235 
236  /// A debug info check failed (with values to print).
237  template <typename T1, typename... Ts>
238  void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
239  const Ts &... Vs) {
240  DebugInfoCheckFailed(Message);
241  if (OS)
242  WriteTs(V1, Vs...);
243  }
244 };
245 
246 } // namespace llvm
247 
248 namespace {
249 
250 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
251  friend class InstVisitor<Verifier>;
252 
253  DominatorTree DT;
254 
255  /// When verifying a basic block, keep track of all of the
256  /// instructions we have seen so far.
257  ///
258  /// This allows us to do efficient dominance checks for the case when an
259  /// instruction has an operand that is an instruction in the same block.
260  SmallPtrSet<Instruction *, 16> InstsInThisBlock;
261 
262  /// Keep track of the metadata nodes that have been checked already.
264 
265  /// Keep track which DISubprogram is attached to which function.
266  DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
267 
268  /// Track all DICompileUnits visited.
270 
271  /// The result type for a landingpad.
272  Type *LandingPadResultTy;
273 
274  /// Whether we've seen a call to @llvm.localescape in this function
275  /// already.
276  bool SawFrameEscape;
277 
278  /// Whether the current function has a DISubprogram attached to it.
279  bool HasDebugInfo = false;
280 
281  /// Whether source was present on the first DIFile encountered in each CU.
282  DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
283 
284  /// Stores the count of how many objects were passed to llvm.localescape for a
285  /// given function and the largest index passed to llvm.localrecover.
287 
288  // Maps catchswitches and cleanuppads that unwind to siblings to the
289  // terminators that indicate the unwind, used to detect cycles therein.
290  MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
291 
292  /// Cache of constants visited in search of ConstantExprs.
293  SmallPtrSet<const Constant *, 32> ConstantExprVisited;
294 
295  /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
296  SmallVector<const Function *, 4> DeoptimizeDeclarations;
297 
298  // Verify that this GlobalValue is only used in this module.
299  // This map is used to avoid visiting uses twice. We can arrive at a user
300  // twice, if they have multiple operands. In particular for very large
301  // constant expressions, we can arrive at a particular user many times.
302  SmallPtrSet<const Value *, 32> GlobalValueVisited;
303 
304  // Keeps track of duplicate function argument debug info.
306 
307  TBAAVerifier TBAAVerifyHelper;
308 
309  void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
310 
311 public:
312  explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
313  const Module &M)
314  : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
315  SawFrameEscape(false), TBAAVerifyHelper(this) {
316  TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
317  }
318 
319  bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
320 
321  bool verify(const Function &F) {
322  assert(F.getParent() == &M &&
323  "An instance of this class only works with a specific module!");
324 
325  // First ensure the function is well-enough formed to compute dominance
326  // information, and directly compute a dominance tree. We don't rely on the
327  // pass manager to provide this as it isolates us from a potentially
328  // out-of-date dominator tree and makes it significantly more complex to run
329  // this code outside of a pass manager.
330  // FIXME: It's really gross that we have to cast away constness here.
331  if (!F.empty())
332  DT.recalculate(const_cast<Function &>(F));
333 
334  for (const BasicBlock &BB : F) {
335  if (!BB.empty() && BB.back().isTerminator())
336  continue;
337 
338  if (OS) {
339  *OS << "Basic Block in function '" << F.getName()
340  << "' does not have terminator!\n";
341  BB.printAsOperand(*OS, true, MST);
342  *OS << "\n";
343  }
344  return false;
345  }
346 
347  Broken = false;
348  // FIXME: We strip const here because the inst visitor strips const.
349  visit(const_cast<Function &>(F));
350  verifySiblingFuncletUnwinds();
351  InstsInThisBlock.clear();
352  DebugFnArgs.clear();
353  LandingPadResultTy = nullptr;
354  SawFrameEscape = false;
355  SiblingFuncletInfo.clear();
356 
357  return !Broken;
358  }
359 
360  /// Verify the module that this instance of \c Verifier was initialized with.
361  bool verify() {
362  Broken = false;
363 
364  // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
365  for (const Function &F : M)
366  if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
367  DeoptimizeDeclarations.push_back(&F);
368 
369  // Now that we've visited every function, verify that we never asked to
370  // recover a frame index that wasn't escaped.
371  verifyFrameRecoverIndices();
372  for (const GlobalVariable &GV : M.globals())
373  visitGlobalVariable(GV);
374 
375  for (const GlobalAlias &GA : M.aliases())
376  visitGlobalAlias(GA);
377 
378  for (const NamedMDNode &NMD : M.named_metadata())
379  visitNamedMDNode(NMD);
380 
381  for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
382  visitComdat(SMEC.getValue());
383 
384  visitModuleFlags(M);
385  visitModuleIdents(M);
386  visitModuleCommandLines(M);
387 
388  verifyCompileUnits();
389 
390  verifyDeoptimizeCallingConvs();
391  DISubprogramAttachments.clear();
392  return !Broken;
393  }
394 
395 private:
396  // Verification methods...
397  void visitGlobalValue(const GlobalValue &GV);
398  void visitGlobalVariable(const GlobalVariable &GV);
399  void visitGlobalAlias(const GlobalAlias &GA);
400  void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
401  void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
402  const GlobalAlias &A, const Constant &C);
403  void visitNamedMDNode(const NamedMDNode &NMD);
404  void visitMDNode(const MDNode &MD);
405  void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
406  void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
407  void visitComdat(const Comdat &C);
408  void visitModuleIdents(const Module &M);
409  void visitModuleCommandLines(const Module &M);
410  void visitModuleFlags(const Module &M);
411  void visitModuleFlag(const MDNode *Op,
413  SmallVectorImpl<const MDNode *> &Requirements);
414  void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
415  void visitFunction(const Function &F);
416  void visitBasicBlock(BasicBlock &BB);
417  void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
418  void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
419 
420  template <class Ty> bool isValidMetadataArray(const MDTuple &N);
421 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
422 #include "llvm/IR/Metadata.def"
423  void visitDIScope(const DIScope &N);
424  void visitDIVariable(const DIVariable &N);
425  void visitDILexicalBlockBase(const DILexicalBlockBase &N);
426  void visitDITemplateParameter(const DITemplateParameter &N);
427 
428  void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
429 
430  // InstVisitor overrides...
432  void visit(Instruction &I);
433 
434  void visitTruncInst(TruncInst &I);
435  void visitZExtInst(ZExtInst &I);
436  void visitSExtInst(SExtInst &I);
437  void visitFPTruncInst(FPTruncInst &I);
438  void visitFPExtInst(FPExtInst &I);
439  void visitFPToUIInst(FPToUIInst &I);
440  void visitFPToSIInst(FPToSIInst &I);
441  void visitUIToFPInst(UIToFPInst &I);
442  void visitSIToFPInst(SIToFPInst &I);
443  void visitIntToPtrInst(IntToPtrInst &I);
444  void visitPtrToIntInst(PtrToIntInst &I);
445  void visitBitCastInst(BitCastInst &I);
446  void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
447  void visitPHINode(PHINode &PN);
448  void visitCallBase(CallBase &Call);
449  void visitUnaryOperator(UnaryOperator &U);
450  void visitBinaryOperator(BinaryOperator &B);
451  void visitICmpInst(ICmpInst &IC);
452  void visitFCmpInst(FCmpInst &FC);
453  void visitExtractElementInst(ExtractElementInst &EI);
454  void visitInsertElementInst(InsertElementInst &EI);
455  void visitShuffleVectorInst(ShuffleVectorInst &EI);
456  void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
457  void visitCallInst(CallInst &CI);
458  void visitInvokeInst(InvokeInst &II);
459  void visitGetElementPtrInst(GetElementPtrInst &GEP);
460  void visitLoadInst(LoadInst &LI);
461  void visitStoreInst(StoreInst &SI);
462  void verifyDominatesUse(Instruction &I, unsigned i);
463  void visitInstruction(Instruction &I);
464  void visitTerminator(Instruction &I);
465  void visitBranchInst(BranchInst &BI);
466  void visitReturnInst(ReturnInst &RI);
467  void visitSwitchInst(SwitchInst &SI);
468  void visitIndirectBrInst(IndirectBrInst &BI);
469  void visitSelectInst(SelectInst &SI);
470  void visitUserOp1(Instruction &I);
471  void visitUserOp2(Instruction &I) { visitUserOp1(I); }
472  void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
473  void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
474  void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
475  void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
476  void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
477  void visitAtomicRMWInst(AtomicRMWInst &RMWI);
478  void visitFenceInst(FenceInst &FI);
479  void visitAllocaInst(AllocaInst &AI);
480  void visitExtractValueInst(ExtractValueInst &EVI);
481  void visitInsertValueInst(InsertValueInst &IVI);
482  void visitEHPadPredecessors(Instruction &I);
483  void visitLandingPadInst(LandingPadInst &LPI);
484  void visitResumeInst(ResumeInst &RI);
485  void visitCatchPadInst(CatchPadInst &CPI);
486  void visitCatchReturnInst(CatchReturnInst &CatchReturn);
487  void visitCleanupPadInst(CleanupPadInst &CPI);
488  void visitFuncletPadInst(FuncletPadInst &FPI);
489  void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
490  void visitCleanupReturnInst(CleanupReturnInst &CRI);
491 
492  void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
493  void verifySwiftErrorValue(const Value *SwiftErrorVal);
494  void verifyMustTailCall(CallInst &CI);
495  bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
496  unsigned ArgNo, std::string &Suffix);
497  bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
498  void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
499  const Value *V);
500  void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
501  void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
502  const Value *V);
503  void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
504 
505  void visitConstantExprsRecursively(const Constant *EntryC);
506  void visitConstantExpr(const ConstantExpr *CE);
507  void verifyStatepoint(const CallBase &Call);
508  void verifyFrameRecoverIndices();
509  void verifySiblingFuncletUnwinds();
510 
511  void verifyFragmentExpression(const DbgVariableIntrinsic &I);
512  template <typename ValueOrMetadata>
513  void verifyFragmentExpression(const DIVariable &V,
515  ValueOrMetadata *Desc);
516  void verifyFnArgs(const DbgVariableIntrinsic &I);
517 
518  /// Module-level debug info verification...
519  void verifyCompileUnits();
520 
521  /// Module-level verification that all @llvm.experimental.deoptimize
522  /// declarations share the same calling convention.
523  void verifyDeoptimizeCallingConvs();
524 
525  /// Verify all-or-nothing property of DIFile source attribute within a CU.
526  void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
527 };
528 
529 } // end anonymous namespace
530 
531 /// We know that cond should be true, if not print an error message.
532 #define Assert(C, ...) \
533  do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
534 
535 /// We know that a debug info condition should be true, if not print
536 /// an error message.
537 #define AssertDI(C, ...) \
538  do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
539 
540 void Verifier::visit(Instruction &I) {
541  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
542  Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
544 }
545 
546 // Helper to recursively iterate over indirect users. By
547 // returning false, the callback can ask to stop recursing
548 // further.
549 static void forEachUser(const Value *User,
551  llvm::function_ref<bool(const Value *)> Callback) {
552  if (!Visited.insert(User).second)
553  return;
554  for (const Value *TheNextUser : User->materialized_users())
555  if (Callback(TheNextUser))
556  forEachUser(TheNextUser, Visited, Callback);
557 }
558 
559 void Verifier::visitGlobalValue(const GlobalValue &GV) {
561  "Global is external, but doesn't have external or weak linkage!", &GV);
562 
564  "huge alignment values are unsupported", &GV);
565  Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
566  "Only global variables can have appending linkage!", &GV);
567 
568  if (GV.hasAppendingLinkage()) {
569  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
570  Assert(GVar && GVar->getValueType()->isArrayTy(),
571  "Only global arrays can have appending linkage!", GVar);
572  }
573 
574  if (GV.isDeclarationForLinker())
575  Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
576 
577  if (GV.hasDLLImportStorageClass()) {
578  Assert(!GV.isDSOLocal(),
579  "GlobalValue with DLLImport Storage is dso_local!", &GV);
580 
581  Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
583  "Global is marked as dllimport, but not external", &GV);
584  }
585 
586  if (GV.hasLocalLinkage())
587  Assert(GV.isDSOLocal(),
588  "GlobalValue with private or internal linkage must be dso_local!",
589  &GV);
590 
591  if (!GV.hasDefaultVisibility() && !GV.hasExternalWeakLinkage())
592  Assert(GV.isDSOLocal(),
593  "GlobalValue with non default visibility must be dso_local!", &GV);
594 
595  forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
596  if (const Instruction *I = dyn_cast<Instruction>(V)) {
597  if (!I->getParent() || !I->getParent()->getParent())
598  CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
599  I);
600  else if (I->getParent()->getParent()->getParent() != &M)
601  CheckFailed("Global is referenced in a different module!", &GV, &M, I,
602  I->getParent()->getParent(),
603  I->getParent()->getParent()->getParent());
604  return false;
605  } else if (const Function *F = dyn_cast<Function>(V)) {
606  if (F->getParent() != &M)
607  CheckFailed("Global is used by function in a different module", &GV, &M,
608  F, F->getParent());
609  return false;
610  }
611  return true;
612  });
613 }
614 
615 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
616  if (GV.hasInitializer()) {
617  Assert(GV.getInitializer()->getType() == GV.getValueType(),
618  "Global variable initializer type does not match global "
619  "variable type!",
620  &GV);
621  // If the global has common linkage, it must have a zero initializer and
622  // cannot be constant.
623  if (GV.hasCommonLinkage()) {
625  "'common' global must have a zero initializer!", &GV);
626  Assert(!GV.isConstant(), "'common' global may not be marked constant!",
627  &GV);
628  Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
629  }
630  }
631 
632  if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
633  GV.getName() == "llvm.global_dtors")) {
635  "invalid linkage for intrinsic global variable", &GV);
636  // Don't worry about emitting an error for it not being an array,
637  // visitGlobalValue will complain on appending non-array.
638  if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
639  StructType *STy = dyn_cast<StructType>(ATy->getElementType());
640  PointerType *FuncPtrTy =
642  getPointerTo(DL.getProgramAddressSpace());
643  // FIXME: Reject the 2-field form in LLVM 4.0.
644  Assert(STy &&
645  (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
646  STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
647  STy->getTypeAtIndex(1) == FuncPtrTy,
648  "wrong type for intrinsic global variable", &GV);
649  if (STy->getNumElements() == 3) {
650  Type *ETy = STy->getTypeAtIndex(2);
651  Assert(ETy->isPointerTy() &&
652  cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
653  "wrong type for intrinsic global variable", &GV);
654  }
655  }
656  }
657 
658  if (GV.hasName() && (GV.getName() == "llvm.used" ||
659  GV.getName() == "llvm.compiler.used")) {
661  "invalid linkage for intrinsic global variable", &GV);
662  Type *GVType = GV.getValueType();
663  if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
664  PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
665  Assert(PTy, "wrong type for intrinsic global variable", &GV);
666  if (GV.hasInitializer()) {
667  const Constant *Init = GV.getInitializer();
668  const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
669  Assert(InitArray, "wrong initalizer for intrinsic global variable",
670  Init);
671  for (Value *Op : InitArray->operands()) {
672  Value *V = Op->stripPointerCastsNoFollowAliases();
673  Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
674  isa<GlobalAlias>(V),
675  "invalid llvm.used member", V);
676  Assert(V->hasName(), "members of llvm.used must be named", V);
677  }
678  }
679  }
680  }
681 
682  // Visit any debug info attachments.
685  for (auto *MD : MDs) {
686  if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
687  visitDIGlobalVariableExpression(*GVE);
688  else
689  AssertDI(false, "!dbg attachment of global variable must be a "
690  "DIGlobalVariableExpression");
691  }
692 
693  if (!GV.hasInitializer()) {
694  visitGlobalValue(GV);
695  return;
696  }
697 
698  // Walk any aggregate initializers looking for bitcasts between address spaces
699  visitConstantExprsRecursively(GV.getInitializer());
700 
701  visitGlobalValue(GV);
702 }
703 
704 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
706  Visited.insert(&GA);
707  visitAliaseeSubExpr(Visited, GA, C);
708 }
709 
710 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
711  const GlobalAlias &GA, const Constant &C) {
712  if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
713  Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
714  &GA);
715 
716  if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
717  Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
718 
719  Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
720  &GA);
721  } else {
722  // Only continue verifying subexpressions of GlobalAliases.
723  // Do not recurse into global initializers.
724  return;
725  }
726  }
727 
728  if (const auto *CE = dyn_cast<ConstantExpr>(&C))
729  visitConstantExprsRecursively(CE);
730 
731  for (const Use &U : C.operands()) {
732  Value *V = &*U;
733  if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
734  visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
735  else if (const auto *C2 = dyn_cast<Constant>(V))
736  visitAliaseeSubExpr(Visited, GA, *C2);
737  }
738 }
739 
740 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
742  "Alias should have private, internal, linkonce, weak, linkonce_odr, "
743  "weak_odr, or external linkage!",
744  &GA);
745  const Constant *Aliasee = GA.getAliasee();
746  Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
747  Assert(GA.getType() == Aliasee->getType(),
748  "Alias and aliasee types should match!", &GA);
749 
750  Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
751  "Aliasee should be either GlobalValue or ConstantExpr", &GA);
752 
753  visitAliaseeSubExpr(GA, *Aliasee);
754 
755  visitGlobalValue(GA);
756 }
757 
758 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
759  // There used to be various other llvm.dbg.* nodes, but we don't support
760  // upgrading them and we want to reserve the namespace for future uses.
761  if (NMD.getName().startswith("llvm.dbg."))
762  AssertDI(NMD.getName() == "llvm.dbg.cu",
763  "unrecognized named metadata node in the llvm.dbg namespace",
764  &NMD);
765  for (const MDNode *MD : NMD.operands()) {
766  if (NMD.getName() == "llvm.dbg.cu")
767  AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
768 
769  if (!MD)
770  continue;
771 
772  visitMDNode(*MD);
773  }
774 }
775 
776 void Verifier::visitMDNode(const MDNode &MD) {
777  // Only visit each node once. Metadata can be mutually recursive, so this
778  // avoids infinite recursion here, as well as being an optimization.
779  if (!MDNodes.insert(&MD).second)
780  return;
781 
782  switch (MD.getMetadataID()) {
783  default:
784  llvm_unreachable("Invalid MDNode subclass");
785  case Metadata::MDTupleKind:
786  break;
787 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
788  case Metadata::CLASS##Kind: \
789  visit##CLASS(cast<CLASS>(MD)); \
790  break;
791 #include "llvm/IR/Metadata.def"
792  }
793 
794  for (const Metadata *Op : MD.operands()) {
795  if (!Op)
796  continue;
797  Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
798  &MD, Op);
799  if (auto *N = dyn_cast<MDNode>(Op)) {
800  visitMDNode(*N);
801  continue;
802  }
803  if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
804  visitValueAsMetadata(*V, nullptr);
805  continue;
806  }
807  }
808 
809  // Check these last, so we diagnose problems in operands first.
810  Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
811  Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
812 }
813 
814 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
815  Assert(MD.getValue(), "Expected valid value", &MD);
816  Assert(!MD.getValue()->getType()->isMetadataTy(),
817  "Unexpected metadata round-trip through values", &MD, MD.getValue());
818 
819  auto *L = dyn_cast<LocalAsMetadata>(&MD);
820  if (!L)
821  return;
822 
823  Assert(F, "function-local metadata used outside a function", L);
824 
825  // If this was an instruction, bb, or argument, verify that it is in the
826  // function that we expect.
827  Function *ActualF = nullptr;
828  if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
829  Assert(I->getParent(), "function-local metadata not in basic block", L, I);
830  ActualF = I->getParent()->getParent();
831  } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
832  ActualF = BB->getParent();
833  else if (Argument *A = dyn_cast<Argument>(L->getValue()))
834  ActualF = A->getParent();
835  assert(ActualF && "Unimplemented function local metadata case!");
836 
837  Assert(ActualF == F, "function-local metadata used in wrong function", L);
838 }
839 
840 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
841  Metadata *MD = MDV.getMetadata();
842  if (auto *N = dyn_cast<MDNode>(MD)) {
843  visitMDNode(*N);
844  return;
845  }
846 
847  // Only visit each node once. Metadata can be mutually recursive, so this
848  // avoids infinite recursion here, as well as being an optimization.
849  if (!MDNodes.insert(MD).second)
850  return;
851 
852  if (auto *V = dyn_cast<ValueAsMetadata>(MD))
853  visitValueAsMetadata(*V, F);
854 }
855 
856 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
857 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
858 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
859 
860 void Verifier::visitDILocation(const DILocation &N) {
861  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
862  "location requires a valid scope", &N, N.getRawScope());
863  if (auto *IA = N.getRawInlinedAt())
864  AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
865  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
866  AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
867 }
868 
869 void Verifier::visitGenericDINode(const GenericDINode &N) {
870  AssertDI(N.getTag(), "invalid tag", &N);
871 }
872 
873 void Verifier::visitDIScope(const DIScope &N) {
874  if (auto *F = N.getRawFile())
875  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
876 }
877 
878 void Verifier::visitDISubrange(const DISubrange &N) {
879  AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
880  auto Count = N.getCount();
881  AssertDI(Count, "Count must either be a signed constant or a DIVariable",
882  &N);
883  AssertDI(!Count.is<ConstantInt*>() ||
884  Count.get<ConstantInt*>()->getSExtValue() >= -1,
885  "invalid subrange count", &N);
886 }
887 
888 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
889  AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
890 }
891 
892 void Verifier::visitDIBasicType(const DIBasicType &N) {
893  AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
894  N.getTag() == dwarf::DW_TAG_unspecified_type,
895  "invalid tag", &N);
896  AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
897  "has conflicting flags", &N);
898 }
899 
900 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
901  // Common scope checks.
902  visitDIScope(N);
903 
904  AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
905  N.getTag() == dwarf::DW_TAG_pointer_type ||
906  N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
907  N.getTag() == dwarf::DW_TAG_reference_type ||
908  N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
909  N.getTag() == dwarf::DW_TAG_const_type ||
910  N.getTag() == dwarf::DW_TAG_volatile_type ||
911  N.getTag() == dwarf::DW_TAG_restrict_type ||
912  N.getTag() == dwarf::DW_TAG_atomic_type ||
913  N.getTag() == dwarf::DW_TAG_member ||
914  N.getTag() == dwarf::DW_TAG_inheritance ||
915  N.getTag() == dwarf::DW_TAG_friend,
916  "invalid tag", &N);
917  if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
918  AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
919  N.getRawExtraData());
920  }
921 
922  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
923  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
924  N.getRawBaseType());
925 
926  if (N.getDWARFAddressSpace()) {
927  AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
928  N.getTag() == dwarf::DW_TAG_reference_type,
929  "DWARF address space only applies to pointer or reference types",
930  &N);
931  }
932 }
933 
934 /// Detect mutually exclusive flags.
935 static bool hasConflictingReferenceFlags(unsigned Flags) {
936  return ((Flags & DINode::FlagLValueReference) &&
937  (Flags & DINode::FlagRValueReference)) ||
938  ((Flags & DINode::FlagTypePassByValue) &&
939  (Flags & DINode::FlagTypePassByReference));
940 }
941 
942 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
943  auto *Params = dyn_cast<MDTuple>(&RawParams);
944  AssertDI(Params, "invalid template params", &N, &RawParams);
945  for (Metadata *Op : Params->operands()) {
946  AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
947  &N, Params, Op);
948  }
949 }
950 
951 void Verifier::visitDICompositeType(const DICompositeType &N) {
952  // Common scope checks.
953  visitDIScope(N);
954 
955  AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
956  N.getTag() == dwarf::DW_TAG_structure_type ||
957  N.getTag() == dwarf::DW_TAG_union_type ||
958  N.getTag() == dwarf::DW_TAG_enumeration_type ||
959  N.getTag() == dwarf::DW_TAG_class_type ||
960  N.getTag() == dwarf::DW_TAG_variant_part,
961  "invalid tag", &N);
962 
963  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
964  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
965  N.getRawBaseType());
966 
967  AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
968  "invalid composite elements", &N, N.getRawElements());
969  AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
970  N.getRawVTableHolder());
972  "invalid reference flags", &N);
973 
974  if (N.isVector()) {
975  const DINodeArray Elements = N.getElements();
976  AssertDI(Elements.size() == 1 &&
977  Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
978  "invalid vector, expected one element of type subrange", &N);
979  }
980 
981  if (auto *Params = N.getRawTemplateParams())
982  visitTemplateParams(N, *Params);
983 
984  if (N.getTag() == dwarf::DW_TAG_class_type ||
985  N.getTag() == dwarf::DW_TAG_union_type) {
986  AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
987  "class/union requires a filename", &N, N.getFile());
988  }
989 
990  if (auto *D = N.getRawDiscriminator()) {
991  AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
992  "discriminator can only appear on variant part");
993  }
994 }
995 
996 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
997  AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
998  if (auto *Types = N.getRawTypeArray()) {
999  AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1000  for (Metadata *Ty : N.getTypeArray()->operands()) {
1001  AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1002  }
1003  }
1005  "invalid reference flags", &N);
1006 }
1007 
1008 void Verifier::visitDIFile(const DIFile &N) {
1009  AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1010  Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1011  if (Checksum) {
1012  AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1013  "invalid checksum kind", &N);
1014  size_t Size;
1015  switch (Checksum->Kind) {
1016  case DIFile::CSK_MD5:
1017  Size = 32;
1018  break;
1019  case DIFile::CSK_SHA1:
1020  Size = 40;
1021  break;
1022  }
1023  AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1024  AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1025  "invalid checksum", &N);
1026  }
1027 }
1028 
1029 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1030  AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1031  AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1032 
1033  // Don't bother verifying the compilation directory or producer string
1034  // as those could be empty.
1035  AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1036  N.getRawFile());
1037  AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1038  N.getFile());
1039 
1040  verifySourceDebugInfo(N, *N.getFile());
1041 
1043  "invalid emission kind", &N);
1044 
1045  if (auto *Array = N.getRawEnumTypes()) {
1046  AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1047  for (Metadata *Op : N.getEnumTypes()->operands()) {
1048  auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1049  AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1050  "invalid enum type", &N, N.getEnumTypes(), Op);
1051  }
1052  }
1053  if (auto *Array = N.getRawRetainedTypes()) {
1054  AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1055  for (Metadata *Op : N.getRetainedTypes()->operands()) {
1056  AssertDI(Op && (isa<DIType>(Op) ||
1057  (isa<DISubprogram>(Op) &&
1058  !cast<DISubprogram>(Op)->isDefinition())),
1059  "invalid retained type", &N, Op);
1060  }
1061  }
1062  if (auto *Array = N.getRawGlobalVariables()) {
1063  AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1064  for (Metadata *Op : N.getGlobalVariables()->operands()) {
1065  AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1066  "invalid global variable ref", &N, Op);
1067  }
1068  }
1069  if (auto *Array = N.getRawImportedEntities()) {
1070  AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1071  for (Metadata *Op : N.getImportedEntities()->operands()) {
1072  AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1073  &N, Op);
1074  }
1075  }
1076  if (auto *Array = N.getRawMacros()) {
1077  AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1078  for (Metadata *Op : N.getMacros()->operands()) {
1079  AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1080  }
1081  }
1082  CUVisited.insert(&N);
1083 }
1084 
1085 void Verifier::visitDISubprogram(const DISubprogram &N) {
1086  AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1087  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1088  if (auto *F = N.getRawFile())
1089  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1090  else
1091  AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1092  if (auto *T = N.getRawType())
1093  AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1094  AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1095  N.getRawContainingType());
1096  if (auto *Params = N.getRawTemplateParams())
1097  visitTemplateParams(N, *Params);
1098  if (auto *S = N.getRawDeclaration())
1099  AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1100  "invalid subprogram declaration", &N, S);
1101  if (auto *RawNode = N.getRawRetainedNodes()) {
1102  auto *Node = dyn_cast<MDTuple>(RawNode);
1103  AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1104  for (Metadata *Op : Node->operands()) {
1105  AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1106  "invalid retained nodes, expected DILocalVariable or DILabel",
1107  &N, Node, Op);
1108  }
1109  }
1110  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1111  "invalid reference flags", &N);
1112 
1113  auto *Unit = N.getRawUnit();
1114  if (N.isDefinition()) {
1115  // Subprogram definitions (not part of the type hierarchy).
1116  AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1117  AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1118  AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1119  if (N.getFile())
1120  verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1121  } else {
1122  // Subprogram declarations (part of the type hierarchy).
1123  AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1124  }
1125 
1126  if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1127  auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1128  AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1129  for (Metadata *Op : ThrownTypes->operands())
1130  AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1131  Op);
1132  }
1133 
1134  if (N.areAllCallsDescribed())
1135  AssertDI(N.isDefinition(),
1136  "DIFlagAllCallsDescribed must be attached to a definition");
1137 }
1138 
1139 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1140  AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1141  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1142  "invalid local scope", &N, N.getRawScope());
1143  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1144  AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1145 }
1146 
1147 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1148  visitDILexicalBlockBase(N);
1149 
1150  AssertDI(N.getLine() || !N.getColumn(),
1151  "cannot have column info without line info", &N);
1152 }
1153 
1154 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1155  visitDILexicalBlockBase(N);
1156 }
1157 
1158 void Verifier::visitDINamespace(const DINamespace &N) {
1159  AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1160  if (auto *S = N.getRawScope())
1161  AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1162 }
1163 
1164 void Verifier::visitDIMacro(const DIMacro &N) {
1167  "invalid macinfo type", &N);
1168  AssertDI(!N.getName().empty(), "anonymous macro", &N);
1169  if (!N.getValue().empty()) {
1170  assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1171  }
1172 }
1173 
1174 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1176  "invalid macinfo type", &N);
1177  if (auto *F = N.getRawFile())
1178  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1179 
1180  if (auto *Array = N.getRawElements()) {
1181  AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1182  for (Metadata *Op : N.getElements()->operands()) {
1183  AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1184  }
1185  }
1186 }
1187 
1188 void Verifier::visitDIModule(const DIModule &N) {
1189  AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1190  AssertDI(!N.getName().empty(), "anonymous module", &N);
1191 }
1192 
1193 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1194  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1195 }
1196 
1197 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1198  visitDITemplateParameter(N);
1199 
1200  AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1201  &N);
1202 }
1203 
1204 void Verifier::visitDITemplateValueParameter(
1205  const DITemplateValueParameter &N) {
1206  visitDITemplateParameter(N);
1207 
1208  AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1209  N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1210  N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1211  "invalid tag", &N);
1212 }
1213 
1214 void Verifier::visitDIVariable(const DIVariable &N) {
1215  if (auto *S = N.getRawScope())
1216  AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1217  if (auto *F = N.getRawFile())
1218  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1219 }
1220 
1221 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1222  // Checks common to all variables.
1223  visitDIVariable(N);
1224 
1225  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1226  AssertDI(!N.getName().empty(), "missing global variable name", &N);
1227  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1228  AssertDI(N.getType(), "missing global variable type", &N);
1229  if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1230  AssertDI(isa<DIDerivedType>(Member),
1231  "invalid static data member declaration", &N, Member);
1232  }
1233 }
1234 
1235 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1236  // Checks common to all variables.
1237  visitDIVariable(N);
1238 
1239  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1240  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1241  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1242  "local variable requires a valid scope", &N, N.getRawScope());
1243  if (auto Ty = N.getType())
1244  AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1245 }
1246 
1247 void Verifier::visitDILabel(const DILabel &N) {
1248  if (auto *S = N.getRawScope())
1249  AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1250  if (auto *F = N.getRawFile())
1251  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1252 
1253  AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1254  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1255  "label requires a valid scope", &N, N.getRawScope());
1256 }
1257 
1258 void Verifier::visitDIExpression(const DIExpression &N) {
1259  AssertDI(N.isValid(), "invalid expression", &N);
1260 }
1261 
1262 void Verifier::visitDIGlobalVariableExpression(
1263  const DIGlobalVariableExpression &GVE) {
1264  AssertDI(GVE.getVariable(), "missing variable");
1265  if (auto *Var = GVE.getVariable())
1266  visitDIGlobalVariable(*Var);
1267  if (auto *Expr = GVE.getExpression()) {
1268  visitDIExpression(*Expr);
1269  if (auto Fragment = Expr->getFragmentInfo())
1270  verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1271  }
1272 }
1273 
1274 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1275  AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1276  if (auto *T = N.getRawType())
1277  AssertDI(isType(T), "invalid type ref", &N, T);
1278  if (auto *F = N.getRawFile())
1279  AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1280 }
1281 
1282 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1283  AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1284  N.getTag() == dwarf::DW_TAG_imported_declaration,
1285  "invalid tag", &N);
1286  if (auto *S = N.getRawScope())
1287  AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1288  AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1289  N.getRawEntity());
1290 }
1291 
1292 void Verifier::visitComdat(const Comdat &C) {
1293  // The Module is invalid if the GlobalValue has private linkage. Entities
1294  // with private linkage don't have entries in the symbol table.
1295  if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1296  Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1297  GV);
1298 }
1299 
1300 void Verifier::visitModuleIdents(const Module &M) {
1301  const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1302  if (!Idents)
1303  return;
1304 
1305  // llvm.ident takes a list of metadata entry. Each entry has only one string.
1306  // Scan each llvm.ident entry and make sure that this requirement is met.
1307  for (const MDNode *N : Idents->operands()) {
1308  Assert(N->getNumOperands() == 1,
1309  "incorrect number of operands in llvm.ident metadata", N);
1310  Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1311  ("invalid value for llvm.ident metadata entry operand"
1312  "(the operand should be a string)"),
1313  N->getOperand(0));
1314  }
1315 }
1316 
1317 void Verifier::visitModuleCommandLines(const Module &M) {
1318  const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1319  if (!CommandLines)
1320  return;
1321 
1322  // llvm.commandline takes a list of metadata entry. Each entry has only one
1323  // string. Scan each llvm.commandline entry and make sure that this
1324  // requirement is met.
1325  for (const MDNode *N : CommandLines->operands()) {
1326  Assert(N->getNumOperands() == 1,
1327  "incorrect number of operands in llvm.commandline metadata", N);
1328  Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1329  ("invalid value for llvm.commandline metadata entry operand"
1330  "(the operand should be a string)"),
1331  N->getOperand(0));
1332  }
1333 }
1334 
1335 void Verifier::visitModuleFlags(const Module &M) {
1336  const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1337  if (!Flags) return;
1338 
1339  // Scan each flag, and track the flags and requirements.
1341  SmallVector<const MDNode*, 16> Requirements;
1342  for (const MDNode *MDN : Flags->operands())
1343  visitModuleFlag(MDN, SeenIDs, Requirements);
1344 
1345  // Validate that the requirements in the module are valid.
1346  for (const MDNode *Requirement : Requirements) {
1347  const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1348  const Metadata *ReqValue = Requirement->getOperand(1);
1349 
1350  const MDNode *Op = SeenIDs.lookup(Flag);
1351  if (!Op) {
1352  CheckFailed("invalid requirement on flag, flag is not present in module",
1353  Flag);
1354  continue;
1355  }
1356 
1357  if (Op->getOperand(2) != ReqValue) {
1358  CheckFailed(("invalid requirement on flag, "
1359  "flag does not have the required value"),
1360  Flag);
1361  continue;
1362  }
1363  }
1364 }
1365 
1366 void
1367 Verifier::visitModuleFlag(const MDNode *Op,
1369  SmallVectorImpl<const MDNode *> &Requirements) {
1370  // Each module flag should have three arguments, the merge behavior (a
1371  // constant int), the flag ID (an MDString), and the value.
1372  Assert(Op->getNumOperands() == 3,
1373  "incorrect number of operands in module flag", Op);
1375  if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1376  Assert(
1377  mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1378  "invalid behavior operand in module flag (expected constant integer)",
1379  Op->getOperand(0));
1380  Assert(false,
1381  "invalid behavior operand in module flag (unexpected constant)",
1382  Op->getOperand(0));
1383  }
1384  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1385  Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1386  Op->getOperand(1));
1387 
1388  // Sanity check the values for behaviors with additional requirements.
1389  switch (MFB) {
1390  case Module::Error:
1391  case Module::Warning:
1392  case Module::Override:
1393  // These behavior types accept any value.
1394  break;
1395 
1396  case Module::Max: {
1397  Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1398  "invalid value for 'max' module flag (expected constant integer)",
1399  Op->getOperand(2));
1400  break;
1401  }
1402 
1403  case Module::Require: {
1404  // The value should itself be an MDNode with two operands, a flag ID (an
1405  // MDString), and a value.
1406  MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1407  Assert(Value && Value->getNumOperands() == 2,
1408  "invalid value for 'require' module flag (expected metadata pair)",
1409  Op->getOperand(2));
1410  Assert(isa<MDString>(Value->getOperand(0)),
1411  ("invalid value for 'require' module flag "
1412  "(first value operand should be a string)"),
1413  Value->getOperand(0));
1414 
1415  // Append it to the list of requirements, to check once all module flags are
1416  // scanned.
1417  Requirements.push_back(Value);
1418  break;
1419  }
1420 
1421  case Module::Append:
1422  case Module::AppendUnique: {
1423  // These behavior types require the operand be an MDNode.
1424  Assert(isa<MDNode>(Op->getOperand(2)),
1425  "invalid value for 'append'-type module flag "
1426  "(expected a metadata node)",
1427  Op->getOperand(2));
1428  break;
1429  }
1430  }
1431 
1432  // Unless this is a "requires" flag, check the ID is unique.
1433  if (MFB != Module::Require) {
1434  bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1435  Assert(Inserted,
1436  "module flag identifiers must be unique (or of 'require' type)", ID);
1437  }
1438 
1439  if (ID->getString() == "wchar_size") {
1441  = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1442  Assert(Value, "wchar_size metadata requires constant integer argument");
1443  }
1444 
1445  if (ID->getString() == "Linker Options") {
1446  // If the llvm.linker.options named metadata exists, we assume that the
1447  // bitcode reader has upgraded the module flag. Otherwise the flag might
1448  // have been created by a client directly.
1449  Assert(M.getNamedMetadata("llvm.linker.options"),
1450  "'Linker Options' named metadata no longer supported");
1451  }
1452 
1453  if (ID->getString() == "CG Profile") {
1454  for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1455  visitModuleFlagCGProfileEntry(MDO);
1456  }
1457 }
1458 
1459 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1460  auto CheckFunction = [&](const MDOperand &FuncMDO) {
1461  if (!FuncMDO)
1462  return;
1463  auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1464  Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1465  FuncMDO);
1466  };
1467  auto Node = dyn_cast_or_null<MDNode>(MDO);
1468  Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1469  CheckFunction(Node->getOperand(0));
1470  CheckFunction(Node->getOperand(1));
1471  auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1472  Assert(Count && Count->getType()->isIntegerTy(),
1473  "expected an integer constant", Node->getOperand(2));
1474 }
1475 
1476 /// Return true if this attribute kind only applies to functions.
1478  switch (Kind) {
1479  case Attribute::NoReturn:
1480  case Attribute::NoCfCheck:
1481  case Attribute::NoUnwind:
1482  case Attribute::NoInline:
1483  case Attribute::AlwaysInline:
1484  case Attribute::OptimizeForSize:
1485  case Attribute::StackProtect:
1486  case Attribute::StackProtectReq:
1487  case Attribute::StackProtectStrong:
1488  case Attribute::SafeStack:
1489  case Attribute::ShadowCallStack:
1490  case Attribute::NoRedZone:
1491  case Attribute::NoImplicitFloat:
1492  case Attribute::Naked:
1493  case Attribute::InlineHint:
1494  case Attribute::StackAlignment:
1495  case Attribute::UWTable:
1496  case Attribute::NonLazyBind:
1497  case Attribute::ReturnsTwice:
1498  case Attribute::SanitizeAddress:
1499  case Attribute::SanitizeHWAddress:
1500  case Attribute::SanitizeThread:
1501  case Attribute::SanitizeMemory:
1502  case Attribute::MinSize:
1503  case Attribute::NoDuplicate:
1504  case Attribute::Builtin:
1505  case Attribute::NoBuiltin:
1506  case Attribute::Cold:
1507  case Attribute::OptForFuzzing:
1508  case Attribute::OptimizeNone:
1509  case Attribute::JumpTable:
1510  case Attribute::Convergent:
1511  case Attribute::ArgMemOnly:
1512  case Attribute::NoRecurse:
1513  case Attribute::InaccessibleMemOnly:
1514  case Attribute::InaccessibleMemOrArgMemOnly:
1515  case Attribute::AllocSize:
1516  case Attribute::SpeculativeLoadHardening:
1517  case Attribute::Speculatable:
1518  case Attribute::StrictFP:
1519  return true;
1520  default:
1521  break;
1522  }
1523  return false;
1524 }
1525 
1526 /// Return true if this is a function attribute that can also appear on
1527 /// arguments.
1529  return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1530  Kind == Attribute::ReadNone;
1531 }
1532 
1533 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1534  const Value *V) {
1535  for (Attribute A : Attrs) {
1536  if (A.isStringAttribute())
1537  continue;
1538 
1539  if (isFuncOnlyAttr(A.getKindAsEnum())) {
1540  if (!IsFunction) {
1541  CheckFailed("Attribute '" + A.getAsString() +
1542  "' only applies to functions!",
1543  V);
1544  return;
1545  }
1546  } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1547  CheckFailed("Attribute '" + A.getAsString() +
1548  "' does not apply to functions!",
1549  V);
1550  return;
1551  }
1552  }
1553 }
1554 
1555 // VerifyParameterAttrs - Check the given attributes for an argument or return
1556 // value of the specified type. The value V is printed in error messages.
1557 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1558  const Value *V) {
1559  if (!Attrs.hasAttributes())
1560  return;
1561 
1562  verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1563 
1564  // Check for mutually incompatible attributes. Only inreg is compatible with
1565  // sret.
1566  unsigned AttrCount = 0;
1567  AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1568  AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1569  AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1570  Attrs.hasAttribute(Attribute::InReg);
1571  AttrCount += Attrs.hasAttribute(Attribute::Nest);
1572  Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1573  "and 'sret' are incompatible!",
1574  V);
1575 
1576  Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1577  Attrs.hasAttribute(Attribute::ReadOnly)),
1578  "Attributes "
1579  "'inalloca and readonly' are incompatible!",
1580  V);
1581 
1582  Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1583  Attrs.hasAttribute(Attribute::Returned)),
1584  "Attributes "
1585  "'sret and returned' are incompatible!",
1586  V);
1587 
1588  Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1589  Attrs.hasAttribute(Attribute::SExt)),
1590  "Attributes "
1591  "'zeroext and signext' are incompatible!",
1592  V);
1593 
1594  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1595  Attrs.hasAttribute(Attribute::ReadOnly)),
1596  "Attributes "
1597  "'readnone and readonly' are incompatible!",
1598  V);
1599 
1600  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1601  Attrs.hasAttribute(Attribute::WriteOnly)),
1602  "Attributes "
1603  "'readnone and writeonly' are incompatible!",
1604  V);
1605 
1606  Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1607  Attrs.hasAttribute(Attribute::WriteOnly)),
1608  "Attributes "
1609  "'readonly and writeonly' are incompatible!",
1610  V);
1611 
1612  Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1613  Attrs.hasAttribute(Attribute::AlwaysInline)),
1614  "Attributes "
1615  "'noinline and alwaysinline' are incompatible!",
1616  V);
1617 
1618  AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1619  Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1620  "Wrong types for attribute: " +
1621  AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1622  V);
1623 
1624  if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1625  SmallPtrSet<Type*, 4> Visited;
1626  if (!PTy->getElementType()->isSized(&Visited)) {
1627  Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1628  !Attrs.hasAttribute(Attribute::InAlloca),
1629  "Attributes 'byval' and 'inalloca' do not support unsized types!",
1630  V);
1631  }
1632  if (!isa<PointerType>(PTy->getElementType()))
1633  Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1634  "Attribute 'swifterror' only applies to parameters "
1635  "with pointer to pointer type!",
1636  V);
1637  } else {
1638  Assert(!Attrs.hasAttribute(Attribute::ByVal),
1639  "Attribute 'byval' only applies to parameters with pointer type!",
1640  V);
1641  Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1642  "Attribute 'swifterror' only applies to parameters "
1643  "with pointer type!",
1644  V);
1645  }
1646 }
1647 
1648 // Check parameter attributes against a function type.
1649 // The value V is printed in error messages.
1650 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1651  const Value *V) {
1652  if (Attrs.isEmpty())
1653  return;
1654 
1655  bool SawNest = false;
1656  bool SawReturned = false;
1657  bool SawSRet = false;
1658  bool SawSwiftSelf = false;
1659  bool SawSwiftError = false;
1660 
1661  // Verify return value attributes.
1662  AttributeSet RetAttrs = Attrs.getRetAttributes();
1663  Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1664  !RetAttrs.hasAttribute(Attribute::Nest) &&
1665  !RetAttrs.hasAttribute(Attribute::StructRet) &&
1666  !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1667  !RetAttrs.hasAttribute(Attribute::Returned) &&
1668  !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1669  !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1670  !RetAttrs.hasAttribute(Attribute::SwiftError)),
1671  "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
1672  "'returned', 'swiftself', and 'swifterror' do not apply to return "
1673  "values!",
1674  V);
1675  Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1676  !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1677  !RetAttrs.hasAttribute(Attribute::ReadNone)),
1678  "Attribute '" + RetAttrs.getAsString() +
1679  "' does not apply to function returns",
1680  V);
1681  verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1682 
1683  // Verify parameter attributes.
1684  for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1685  Type *Ty = FT->getParamType(i);
1686  AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1687 
1688  verifyParameterAttrs(ArgAttrs, Ty, V);
1689 
1690  if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1691  Assert(!SawNest, "More than one parameter has attribute nest!", V);
1692  SawNest = true;
1693  }
1694 
1695  if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1696  Assert(!SawReturned, "More than one parameter has attribute returned!",
1697  V);
1699  "Incompatible argument and return types for 'returned' attribute",
1700  V);
1701  SawReturned = true;
1702  }
1703 
1704  if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1705  Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1706  Assert(i == 0 || i == 1,
1707  "Attribute 'sret' is not on first or second parameter!", V);
1708  SawSRet = true;
1709  }
1710 
1711  if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1712  Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1713  SawSwiftSelf = true;
1714  }
1715 
1716  if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1717  Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1718  V);
1719  SawSwiftError = true;
1720  }
1721 
1722  if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1723  Assert(i == FT->getNumParams() - 1,
1724  "inalloca isn't on the last parameter!", V);
1725  }
1726  }
1727 
1729  return;
1730 
1731  verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1732 
1733  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1734  Attrs.hasFnAttribute(Attribute::ReadOnly)),
1735  "Attributes 'readnone and readonly' are incompatible!", V);
1736 
1737  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1738  Attrs.hasFnAttribute(Attribute::WriteOnly)),
1739  "Attributes 'readnone and writeonly' are incompatible!", V);
1740 
1741  Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1742  Attrs.hasFnAttribute(Attribute::WriteOnly)),
1743  "Attributes 'readonly and writeonly' are incompatible!", V);
1744 
1745  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1746  Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1747  "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1748  "incompatible!",
1749  V);
1750 
1751  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1752  Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1753  "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1754 
1755  Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1756  Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1757  "Attributes 'noinline and alwaysinline' are incompatible!", V);
1758 
1759  if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1760  Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1761  "Attribute 'optnone' requires 'noinline'!", V);
1762 
1763  Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1764  "Attributes 'optsize and optnone' are incompatible!", V);
1765 
1766  Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1767  "Attributes 'minsize and optnone' are incompatible!", V);
1768  }
1769 
1770  if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1771  const GlobalValue *GV = cast<GlobalValue>(V);
1773  "Attribute 'jumptable' requires 'unnamed_addr'", V);
1774  }
1775 
1776  if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1777  std::pair<unsigned, Optional<unsigned>> Args =
1779 
1780  auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1781  if (ParamNo >= FT->getNumParams()) {
1782  CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1783  return false;
1784  }
1785 
1786  if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1787  CheckFailed("'allocsize' " + Name +
1788  " argument must refer to an integer parameter",
1789  V);
1790  return false;
1791  }
1792 
1793  return true;
1794  };
1795 
1796  if (!CheckParam("element size", Args.first))
1797  return;
1798 
1799  if (Args.second && !CheckParam("number of elements", *Args.second))
1800  return;
1801  }
1802 }
1803 
1804 void Verifier::verifyFunctionMetadata(
1805  ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1806  for (const auto &Pair : MDs) {
1807  if (Pair.first == LLVMContext::MD_prof) {
1808  MDNode *MD = Pair.second;
1809  Assert(MD->getNumOperands() >= 2,
1810  "!prof annotations should have no less than 2 operands", MD);
1811 
1812  // Check first operand.
1813  Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1814  MD);
1815  Assert(isa<MDString>(MD->getOperand(0)),
1816  "expected string with name of the !prof annotation", MD);
1817  MDString *MDS = cast<MDString>(MD->getOperand(0));
1818  StringRef ProfName = MDS->getString();
1819  Assert(ProfName.equals("function_entry_count") ||
1820  ProfName.equals("synthetic_function_entry_count"),
1821  "first operand should be 'function_entry_count'"
1822  " or 'synthetic_function_entry_count'",
1823  MD);
1824 
1825  // Check second operand.
1826  Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1827  MD);
1828  Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1829  "expected integer argument to function_entry_count", MD);
1830  }
1831  }
1832 }
1833 
1834 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1835  if (!ConstantExprVisited.insert(EntryC).second)
1836  return;
1837 
1839  Stack.push_back(EntryC);
1840 
1841  while (!Stack.empty()) {
1842  const Constant *C = Stack.pop_back_val();
1843 
1844  // Check this constant expression.
1845  if (const auto *CE = dyn_cast<ConstantExpr>(C))
1846  visitConstantExpr(CE);
1847 
1848  if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1849  // Global Values get visited separately, but we do need to make sure
1850  // that the global value is in the correct module
1851  Assert(GV->getParent() == &M, "Referencing global in another module!",
1852  EntryC, &M, GV, GV->getParent());
1853  continue;
1854  }
1855 
1856  // Visit all sub-expressions.
1857  for (const Use &U : C->operands()) {
1858  const auto *OpC = dyn_cast<Constant>(U);
1859  if (!OpC)
1860  continue;
1861  if (!ConstantExprVisited.insert(OpC).second)
1862  continue;
1863  Stack.push_back(OpC);
1864  }
1865  }
1866 }
1867 
1868 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1869  if (CE->getOpcode() == Instruction::BitCast)
1870  Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1871  CE->getType()),
1872  "Invalid bitcast", CE);
1873 
1874  if (CE->getOpcode() == Instruction::IntToPtr ||
1875  CE->getOpcode() == Instruction::PtrToInt) {
1876  auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1877  ? CE->getType()
1878  : CE->getOperand(0)->getType();
1879  StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1880  ? "inttoptr not supported for non-integral pointers"
1881  : "ptrtoint not supported for non-integral pointers";
1882  Assert(
1883  !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1884  Msg);
1885  }
1886 }
1887 
1888 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1889  // There shouldn't be more attribute sets than there are parameters plus the
1890  // function and return value.
1891  return Attrs.getNumAttrSets() <= Params + 2;
1892 }
1893 
1894 /// Verify that statepoint intrinsic is well formed.
1895 void Verifier::verifyStatepoint(const CallBase &Call) {
1896  assert(Call.getCalledFunction() &&
1897  Call.getCalledFunction()->getIntrinsicID() ==
1898  Intrinsic::experimental_gc_statepoint);
1899 
1900  Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
1901  !Call.onlyAccessesArgMemory(),
1902  "gc.statepoint must read and write all memory to preserve "
1903  "reordering restrictions required by safepoint semantics",
1904  Call);
1905 
1906  const Value *IDV = Call.getArgOperand(0);
1907  Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
1908  Call);
1909 
1910  const Value *NumPatchBytesV = Call.getArgOperand(1);
1911  Assert(isa<ConstantInt>(NumPatchBytesV),
1912  "gc.statepoint number of patchable bytes must be a constant integer",
1913  Call);
1914  const int64_t NumPatchBytes =
1915  cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
1916  assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1917  Assert(NumPatchBytes >= 0,
1918  "gc.statepoint number of patchable bytes must be "
1919  "positive",
1920  Call);
1921 
1922  const Value *Target = Call.getArgOperand(2);
1923  auto *PT = dyn_cast<PointerType>(Target->getType());
1924  Assert(PT && PT->getElementType()->isFunctionTy(),
1925  "gc.statepoint callee must be of function pointer type", Call, Target);
1926  FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1927 
1928  const Value *NumCallArgsV = Call.getArgOperand(3);
1929  Assert(isa<ConstantInt>(NumCallArgsV),
1930  "gc.statepoint number of arguments to underlying call "
1931  "must be constant integer",
1932  Call);
1933  const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
1934  Assert(NumCallArgs >= 0,
1935  "gc.statepoint number of arguments to underlying call "
1936  "must be positive",
1937  Call);
1938  const int NumParams = (int)TargetFuncType->getNumParams();
1939  if (TargetFuncType->isVarArg()) {
1940  Assert(NumCallArgs >= NumParams,
1941  "gc.statepoint mismatch in number of vararg call args", Call);
1942 
1943  // TODO: Remove this limitation
1944  Assert(TargetFuncType->getReturnType()->isVoidTy(),
1945  "gc.statepoint doesn't support wrapping non-void "
1946  "vararg functions yet",
1947  Call);
1948  } else
1949  Assert(NumCallArgs == NumParams,
1950  "gc.statepoint mismatch in number of call args", Call);
1951 
1952  const Value *FlagsV = Call.getArgOperand(4);
1953  Assert(isa<ConstantInt>(FlagsV),
1954  "gc.statepoint flags must be constant integer", Call);
1955  const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
1956  Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
1957  "unknown flag used in gc.statepoint flags argument", Call);
1958 
1959  // Verify that the types of the call parameter arguments match
1960  // the type of the wrapped callee.
1961  AttributeList Attrs = Call.getAttributes();
1962  for (int i = 0; i < NumParams; i++) {
1963  Type *ParamType = TargetFuncType->getParamType(i);
1964  Type *ArgType = Call.getArgOperand(5 + i)->getType();
1965  Assert(ArgType == ParamType,
1966  "gc.statepoint call argument does not match wrapped "
1967  "function type",
1968  Call);
1969 
1970  if (TargetFuncType->isVarArg()) {
1971  AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
1972  Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
1973  "Attribute 'sret' cannot be used for vararg call arguments!",
1974  Call);
1975  }
1976  }
1977 
1978  const int EndCallArgsInx = 4 + NumCallArgs;
1979 
1980  const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
1981  Assert(isa<ConstantInt>(NumTransitionArgsV),
1982  "gc.statepoint number of transition arguments "
1983  "must be constant integer",
1984  Call);
1985  const int NumTransitionArgs =
1986  cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
1987  Assert(NumTransitionArgs >= 0,
1988  "gc.statepoint number of transition arguments must be positive", Call);
1989  const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
1990 
1991  const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
1992  Assert(isa<ConstantInt>(NumDeoptArgsV),
1993  "gc.statepoint number of deoptimization arguments "
1994  "must be constant integer",
1995  Call);
1996  const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
1997  Assert(NumDeoptArgs >= 0,
1998  "gc.statepoint number of deoptimization arguments "
1999  "must be positive",
2000  Call);
2001 
2002  const int ExpectedNumArgs =
2003  7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2004  Assert(ExpectedNumArgs <= (int)Call.arg_size(),
2005  "gc.statepoint too few arguments according to length fields", Call);
2006 
2007  // Check that the only uses of this gc.statepoint are gc.result or
2008  // gc.relocate calls which are tied to this statepoint and thus part
2009  // of the same statepoint sequence
2010  for (const User *U : Call.users()) {
2011  const CallInst *UserCall = dyn_cast<const CallInst>(U);
2012  Assert(UserCall, "illegal use of statepoint token", Call, U);
2013  if (!UserCall)
2014  continue;
2015  Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2016  "gc.result or gc.relocate are the only value uses "
2017  "of a gc.statepoint",
2018  Call, U);
2019  if (isa<GCResultInst>(UserCall)) {
2020  Assert(UserCall->getArgOperand(0) == &Call,
2021  "gc.result connected to wrong gc.statepoint", Call, UserCall);
2022  } else if (isa<GCRelocateInst>(Call)) {
2023  Assert(UserCall->getArgOperand(0) == &Call,
2024  "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2025  }
2026  }
2027 
2028  // Note: It is legal for a single derived pointer to be listed multiple
2029  // times. It's non-optimal, but it is legal. It can also happen after
2030  // insertion if we strip a bitcast away.
2031  // Note: It is really tempting to check that each base is relocated and
2032  // that a derived pointer is never reused as a base pointer. This turns
2033  // out to be problematic since optimizations run after safepoint insertion
2034  // can recognize equality properties that the insertion logic doesn't know
2035  // about. See example statepoint.ll in the verifier subdirectory
2036 }
2037 
2038 void Verifier::verifyFrameRecoverIndices() {
2039  for (auto &Counts : FrameEscapeInfo) {
2040  Function *F = Counts.first;
2041  unsigned EscapedObjectCount = Counts.second.first;
2042  unsigned MaxRecoveredIndex = Counts.second.second;
2043  Assert(MaxRecoveredIndex <= EscapedObjectCount,
2044  "all indices passed to llvm.localrecover must be less than the "
2045  "number of arguments passed ot llvm.localescape in the parent "
2046  "function",
2047  F);
2048  }
2049 }
2050 
2052  BasicBlock *UnwindDest;
2053  if (auto *II = dyn_cast<InvokeInst>(Terminator))
2054  UnwindDest = II->getUnwindDest();
2055  else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2056  UnwindDest = CSI->getUnwindDest();
2057  else
2058  UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2059  return UnwindDest->getFirstNonPHI();
2060 }
2061 
2062 void Verifier::verifySiblingFuncletUnwinds() {
2065  for (const auto &Pair : SiblingFuncletInfo) {
2066  Instruction *PredPad = Pair.first;
2067  if (Visited.count(PredPad))
2068  continue;
2069  Active.insert(PredPad);
2070  Instruction *Terminator = Pair.second;
2071  do {
2072  Instruction *SuccPad = getSuccPad(Terminator);
2073  if (Active.count(SuccPad)) {
2074  // Found a cycle; report error
2075  Instruction *CyclePad = SuccPad;
2076  SmallVector<Instruction *, 8> CycleNodes;
2077  do {
2078  CycleNodes.push_back(CyclePad);
2079  Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2080  if (CycleTerminator != CyclePad)
2081  CycleNodes.push_back(CycleTerminator);
2082  CyclePad = getSuccPad(CycleTerminator);
2083  } while (CyclePad != SuccPad);
2084  Assert(false, "EH pads can't handle each other's exceptions",
2085  ArrayRef<Instruction *>(CycleNodes));
2086  }
2087  // Don't re-walk a node we've already checked
2088  if (!Visited.insert(SuccPad).second)
2089  break;
2090  // Walk to this successor if it has a map entry.
2091  PredPad = SuccPad;
2092  auto TermI = SiblingFuncletInfo.find(PredPad);
2093  if (TermI == SiblingFuncletInfo.end())
2094  break;
2095  Terminator = TermI->second;
2096  Active.insert(PredPad);
2097  } while (true);
2098  // Each node only has one successor, so we've walked all the active
2099  // nodes' successors.
2100  Active.clear();
2101  }
2102 }
2103 
2104 // visitFunction - Verify that a function is ok.
2105 //
2106 void Verifier::visitFunction(const Function &F) {
2107  visitGlobalValue(F);
2108 
2109  // Check function arguments.
2110  FunctionType *FT = F.getFunctionType();
2111  unsigned NumArgs = F.arg_size();
2112 
2113  Assert(&Context == &F.getContext(),
2114  "Function context does not match Module context!", &F);
2115 
2116  Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2117  Assert(FT->getNumParams() == NumArgs,
2118  "# formal arguments must match # of arguments for function type!", &F,
2119  FT);
2120  Assert(F.getReturnType()->isFirstClassType() ||
2121  F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2122  "Functions cannot return aggregate values!", &F);
2123 
2124  Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2125  "Invalid struct return type!", &F);
2126 
2127  AttributeList Attrs = F.getAttributes();
2128 
2129  Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2130  "Attribute after last parameter!", &F);
2131 
2132  // Check function attributes.
2133  verifyFunctionAttrs(FT, Attrs, &F);
2134 
2135  // On function declarations/definitions, we do not support the builtin
2136  // attribute. We do not check this in VerifyFunctionAttrs since that is
2137  // checking for Attributes that can/can not ever be on functions.
2138  Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2139  "Attribute 'builtin' can only be applied to a callsite.", &F);
2140 
2141  // Check that this function meets the restrictions on this calling convention.
2142  // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2143  // restrictions can be lifted.
2144  switch (F.getCallingConv()) {
2145  default:
2146  case CallingConv::C:
2147  break;
2150  Assert(F.getReturnType()->isVoidTy(),
2151  "Calling convention requires void return type", &F);
2158  Assert(!F.hasStructRetAttr(),
2159  "Calling convention does not allow sret", &F);
2161  case CallingConv::Fast:
2162  case CallingConv::Cold:
2166  Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2167  "perfect forwarding!",
2168  &F);
2169  break;
2170  }
2171 
2172  bool isLLVMdotName = F.getName().size() >= 5 &&
2173  F.getName().substr(0, 5) == "llvm.";
2174 
2175  // Check that the argument values match the function type for this function...
2176  unsigned i = 0;
2177  for (const Argument &Arg : F.args()) {
2178  Assert(Arg.getType() == FT->getParamType(i),
2179  "Argument value does not match function argument type!", &Arg,
2180  FT->getParamType(i));
2182  "Function arguments must have first-class types!", &Arg);
2183  if (!isLLVMdotName) {
2185  "Function takes metadata but isn't an intrinsic", &Arg, &F);
2186  Assert(!Arg.getType()->isTokenTy(),
2187  "Function takes token but isn't an intrinsic", &Arg, &F);
2188  }
2189 
2190  // Check that swifterror argument is only used by loads and stores.
2191  if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2192  verifySwiftErrorValue(&Arg);
2193  }
2194  ++i;
2195  }
2196 
2197  if (!isLLVMdotName)
2198  Assert(!F.getReturnType()->isTokenTy(),
2199  "Functions returns a token but isn't an intrinsic", &F);
2200 
2201  // Get the function metadata attachments.
2203  F.getAllMetadata(MDs);
2204  assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2205  verifyFunctionMetadata(MDs);
2206 
2207  // Check validity of the personality function
2208  if (F.hasPersonalityFn()) {
2209  auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2210  if (Per)
2211  Assert(Per->getParent() == F.getParent(),
2212  "Referencing personality function in another module!",
2213  &F, F.getParent(), Per, Per->getParent());
2214  }
2215 
2216  if (F.isMaterializable()) {
2217  // Function has a body somewhere we can't see.
2218  Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2219  MDs.empty() ? nullptr : MDs.front().second);
2220  } else if (F.isDeclaration()) {
2221  for (const auto &I : MDs) {
2222  AssertDI(I.first != LLVMContext::MD_dbg,
2223  "function declaration may not have a !dbg attachment", &F);
2224  Assert(I.first != LLVMContext::MD_prof,
2225  "function declaration may not have a !prof attachment", &F);
2226 
2227  // Verify the metadata itself.
2228  visitMDNode(*I.second);
2229  }
2230  Assert(!F.hasPersonalityFn(),
2231  "Function declaration shouldn't have a personality routine", &F);
2232  } else {
2233  // Verify that this function (which has a body) is not named "llvm.*". It
2234  // is not legal to define intrinsics.
2235  Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2236 
2237  // Check the entry node
2238  const BasicBlock *Entry = &F.getEntryBlock();
2239  Assert(pred_empty(Entry),
2240  "Entry block to function must not have predecessors!", Entry);
2241 
2242  // The address of the entry block cannot be taken, unless it is dead.
2243  if (Entry->hasAddressTaken()) {
2244  Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2245  "blockaddress may not be used with the entry block!", Entry);
2246  }
2247 
2248  unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2249  // Visit metadata attachments.
2250  for (const auto &I : MDs) {
2251  // Verify that the attachment is legal.
2252  switch (I.first) {
2253  default:
2254  break;
2255  case LLVMContext::MD_dbg: {
2256  ++NumDebugAttachments;
2257  AssertDI(NumDebugAttachments == 1,
2258  "function must have a single !dbg attachment", &F, I.second);
2259  AssertDI(isa<DISubprogram>(I.second),
2260  "function !dbg attachment must be a subprogram", &F, I.second);
2261  auto *SP = cast<DISubprogram>(I.second);
2262  const Function *&AttachedTo = DISubprogramAttachments[SP];
2263  AssertDI(!AttachedTo || AttachedTo == &F,
2264  "DISubprogram attached to more than one function", SP, &F);
2265  AttachedTo = &F;
2266  break;
2267  }
2268  case LLVMContext::MD_prof:
2269  ++NumProfAttachments;
2270  Assert(NumProfAttachments == 1,
2271  "function must have a single !prof attachment", &F, I.second);
2272  break;
2273  }
2274 
2275  // Verify the metadata itself.
2276  visitMDNode(*I.second);
2277  }
2278  }
2279 
2280  // If this function is actually an intrinsic, verify that it is only used in
2281  // direct call/invokes, never having its "address taken".
2282  // Only do this if the module is materialized, otherwise we don't have all the
2283  // uses.
2284  if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2285  const User *U;
2286  if (F.hasAddressTaken(&U))
2287  Assert(false, "Invalid user of intrinsic instruction!", U);
2288  }
2289 
2290  auto *N = F.getSubprogram();
2291  HasDebugInfo = (N != nullptr);
2292  if (!HasDebugInfo)
2293  return;
2294 
2295  // Check that all !dbg attachments lead to back to N (or, at least, another
2296  // subprogram that describes the same function).
2297  //
2298  // FIXME: Check this incrementally while visiting !dbg attachments.
2299  // FIXME: Only check when N is the canonical subprogram for F.
2301  for (auto &BB : F)
2302  for (auto &I : BB) {
2303  // Be careful about using DILocation here since we might be dealing with
2304  // broken code (this is the Verifier after all).
2305  DILocation *DL =
2306  dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
2307  if (!DL)
2308  continue;
2309  if (!Seen.insert(DL).second)
2310  continue;
2311 
2312  Metadata *Parent = DL->getRawScope();
2313  AssertDI(Parent && isa<DILocalScope>(Parent),
2314  "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2315  Parent);
2316  DILocalScope *Scope = DL->getInlinedAtScope();
2317  if (Scope && !Seen.insert(Scope).second)
2318  continue;
2319 
2320  DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2321 
2322  // Scope and SP could be the same MDNode and we don't want to skip
2323  // validation in that case
2324  if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2325  continue;
2326 
2327  // FIXME: Once N is canonical, check "SP == &N".
2328  AssertDI(SP->describes(&F),
2329  "!dbg attachment points at wrong subprogram for function", N, &F,
2330  &I, DL, Scope, SP);
2331  }
2332 }
2333 
2334 // verifyBasicBlock - Verify that a basic block is well formed...
2335 //
2336 void Verifier::visitBasicBlock(BasicBlock &BB) {
2337  InstsInThisBlock.clear();
2338 
2339  // Ensure that basic blocks have terminators!
2340  Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2341 
2342  // Check constraints that this basic block imposes on all of the PHI nodes in
2343  // it.
2344  if (isa<PHINode>(BB.front())) {
2347  llvm::sort(Preds);
2348  for (const PHINode &PN : BB.phis()) {
2349  // Ensure that PHI nodes have at least one entry!
2350  Assert(PN.getNumIncomingValues() != 0,
2351  "PHI nodes must have at least one entry. If the block is dead, "
2352  "the PHI should be removed!",
2353  &PN);
2354  Assert(PN.getNumIncomingValues() == Preds.size(),
2355  "PHINode should have one entry for each predecessor of its "
2356  "parent basic block!",
2357  &PN);
2358 
2359  // Get and sort all incoming values in the PHI node...
2360  Values.clear();
2361  Values.reserve(PN.getNumIncomingValues());
2362  for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2363  Values.push_back(
2364  std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2365  llvm::sort(Values);
2366 
2367  for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2368  // Check to make sure that if there is more than one entry for a
2369  // particular basic block in this PHI node, that the incoming values are
2370  // all identical.
2371  //
2372  Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2373  Values[i].second == Values[i - 1].second,
2374  "PHI node has multiple entries for the same basic block with "
2375  "different incoming values!",
2376  &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2377 
2378  // Check to make sure that the predecessors and PHI node entries are
2379  // matched up.
2380  Assert(Values[i].first == Preds[i],
2381  "PHI node entries do not match predecessors!", &PN,
2382  Values[i].first, Preds[i]);
2383  }
2384  }
2385  }
2386 
2387  // Check that all instructions have their parent pointers set up correctly.
2388  for (auto &I : BB)
2389  {
2390  Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2391  }
2392 }
2393 
2394 void Verifier::visitTerminator(Instruction &I) {
2395  // Ensure that terminators only exist at the end of the basic block.
2396  Assert(&I == I.getParent()->getTerminator(),
2397  "Terminator found in the middle of a basic block!", I.getParent());
2398  visitInstruction(I);
2399 }
2400 
2401 void Verifier::visitBranchInst(BranchInst &BI) {
2402  if (BI.isConditional()) {
2403  Assert(BI.getCondition()->getType()->isIntegerTy(1),
2404  "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2405  }
2406  visitTerminator(BI);
2407 }
2408 
2409 void Verifier::visitReturnInst(ReturnInst &RI) {
2410  Function *F = RI.getParent()->getParent();
2411  unsigned N = RI.getNumOperands();
2412  if (F->getReturnType()->isVoidTy())
2413  Assert(N == 0,
2414  "Found return instr that returns non-void in Function of void "
2415  "return type!",
2416  &RI, F->getReturnType());
2417  else
2418  Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2419  "Function return type does not match operand "
2420  "type of return inst!",
2421  &RI, F->getReturnType());
2422 
2423  // Check to make sure that the return value has necessary properties for
2424  // terminators...
2425  visitTerminator(RI);
2426 }
2427 
2428 void Verifier::visitSwitchInst(SwitchInst &SI) {
2429  // Check to make sure that all of the constants in the switch instruction
2430  // have the same type as the switched-on value.
2431  Type *SwitchTy = SI.getCondition()->getType();
2433  for (auto &Case : SI.cases()) {
2434  Assert(Case.getCaseValue()->getType() == SwitchTy,
2435  "Switch constants must all be same type as switch value!", &SI);
2436  Assert(Constants.insert(Case.getCaseValue()).second,
2437  "Duplicate integer as switch case", &SI, Case.getCaseValue());
2438  }
2439 
2440  visitTerminator(SI);
2441 }
2442 
2443 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2444  Assert(BI.getAddress()->getType()->isPointerTy(),
2445  "Indirectbr operand must have pointer type!", &BI);
2446  for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2447  Assert(BI.getDestination(i)->getType()->isLabelTy(),
2448  "Indirectbr destinations must all have pointer type!", &BI);
2449 
2450  visitTerminator(BI);
2451 }
2452 
2453 void Verifier::visitSelectInst(SelectInst &SI) {
2455  SI.getOperand(2)),
2456  "Invalid operands for select instruction!", &SI);
2457 
2458  Assert(SI.getTrueValue()->getType() == SI.getType(),
2459  "Select values must have same type as select instruction!", &SI);
2460  visitInstruction(SI);
2461 }
2462 
2463 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2464 /// a pass, if any exist, it's an error.
2465 ///
2466 void Verifier::visitUserOp1(Instruction &I) {
2467  Assert(false, "User-defined operators should not live outside of a pass!", &I);
2468 }
2469 
2470 void Verifier::visitTruncInst(TruncInst &I) {
2471  // Get the source and destination types
2472  Type *SrcTy = I.getOperand(0)->getType();
2473  Type *DestTy = I.getType();
2474 
2475  // Get the size of the types in bits, we'll need this later
2476  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2477  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2478 
2479  Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2480  Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2481  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2482  "trunc source and destination must both be a vector or neither", &I);
2483  Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2484 
2485  visitInstruction(I);
2486 }
2487 
2488 void Verifier::visitZExtInst(ZExtInst &I) {
2489  // Get the source and destination types
2490  Type *SrcTy = I.getOperand(0)->getType();
2491  Type *DestTy = I.getType();
2492 
2493  // Get the size of the types in bits, we'll need this later
2494  Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2495  Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2496  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2497  "zext source and destination must both be a vector or neither", &I);
2498  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2499  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2500 
2501  Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2502 
2503  visitInstruction(I);
2504 }
2505 
2506 void Verifier::visitSExtInst(SExtInst &I) {
2507  // Get the source and destination types
2508  Type *SrcTy = I.getOperand(0)->getType();
2509  Type *DestTy = I.getType();
2510 
2511  // Get the size of the types in bits, we'll need this later
2512  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2513  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2514 
2515  Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2516  Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2517  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2518  "sext source and destination must both be a vector or neither", &I);
2519  Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2520 
2521  visitInstruction(I);
2522 }
2523 
2524 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2525  // Get the source and destination types
2526  Type *SrcTy = I.getOperand(0)->getType();
2527  Type *DestTy = I.getType();
2528  // Get the size of the types in bits, we'll need this later
2529  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2530  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2531 
2532  Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2533  Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2534  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2535  "fptrunc source and destination must both be a vector or neither", &I);
2536  Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2537 
2538  visitInstruction(I);
2539 }
2540 
2541 void Verifier::visitFPExtInst(FPExtInst &I) {
2542  // Get the source and destination types
2543  Type *SrcTy = I.getOperand(0)->getType();
2544  Type *DestTy = I.getType();
2545 
2546  // Get the size of the types in bits, we'll need this later
2547  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2548  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2549 
2550  Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2551  Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2552  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2553  "fpext source and destination must both be a vector or neither", &I);
2554  Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2555 
2556  visitInstruction(I);
2557 }
2558 
2559 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2560  // Get the source and destination types
2561  Type *SrcTy = I.getOperand(0)->getType();
2562  Type *DestTy = I.getType();
2563 
2564  bool SrcVec = SrcTy->isVectorTy();
2565  bool DstVec = DestTy->isVectorTy();
2566 
2567  Assert(SrcVec == DstVec,
2568  "UIToFP source and dest must both be vector or scalar", &I);
2569  Assert(SrcTy->isIntOrIntVectorTy(),
2570  "UIToFP source must be integer or integer vector", &I);
2571  Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2572  &I);
2573 
2574  if (SrcVec && DstVec)
2575  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2576  cast<VectorType>(DestTy)->getNumElements(),
2577  "UIToFP source and dest vector length mismatch", &I);
2578 
2579  visitInstruction(I);
2580 }
2581 
2582 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2583  // Get the source and destination types
2584  Type *SrcTy = I.getOperand(0)->getType();
2585  Type *DestTy = I.getType();
2586 
2587  bool SrcVec = SrcTy->isVectorTy();
2588  bool DstVec = DestTy->isVectorTy();
2589 
2590  Assert(SrcVec == DstVec,
2591  "SIToFP source and dest must both be vector or scalar", &I);
2592  Assert(SrcTy->isIntOrIntVectorTy(),
2593  "SIToFP source must be integer or integer vector", &I);
2594  Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2595  &I);
2596 
2597  if (SrcVec && DstVec)
2598  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2599  cast<VectorType>(DestTy)->getNumElements(),
2600  "SIToFP source and dest vector length mismatch", &I);
2601 
2602  visitInstruction(I);
2603 }
2604 
2605 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2606  // Get the source and destination types
2607  Type *SrcTy = I.getOperand(0)->getType();
2608  Type *DestTy = I.getType();
2609 
2610  bool SrcVec = SrcTy->isVectorTy();
2611  bool DstVec = DestTy->isVectorTy();
2612 
2613  Assert(SrcVec == DstVec,
2614  "FPToUI source and dest must both be vector or scalar", &I);
2615  Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2616  &I);
2617  Assert(DestTy->isIntOrIntVectorTy(),
2618  "FPToUI result must be integer or integer vector", &I);
2619 
2620  if (SrcVec && DstVec)
2621  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2622  cast<VectorType>(DestTy)->getNumElements(),
2623  "FPToUI source and dest vector length mismatch", &I);
2624 
2625  visitInstruction(I);
2626 }
2627 
2628 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2629  // Get the source and destination types
2630  Type *SrcTy = I.getOperand(0)->getType();
2631  Type *DestTy = I.getType();
2632 
2633  bool SrcVec = SrcTy->isVectorTy();
2634  bool DstVec = DestTy->isVectorTy();
2635 
2636  Assert(SrcVec == DstVec,
2637  "FPToSI source and dest must both be vector or scalar", &I);
2638  Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2639  &I);
2640  Assert(DestTy->isIntOrIntVectorTy(),
2641  "FPToSI result must be integer or integer vector", &I);
2642 
2643  if (SrcVec && DstVec)
2644  Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2645  cast<VectorType>(DestTy)->getNumElements(),
2646  "FPToSI source and dest vector length mismatch", &I);
2647 
2648  visitInstruction(I);
2649 }
2650 
2651 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2652  // Get the source and destination types
2653  Type *SrcTy = I.getOperand(0)->getType();
2654  Type *DestTy = I.getType();
2655 
2656  Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2657 
2658  if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2660  "ptrtoint not supported for non-integral pointers");
2661 
2662  Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2663  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2664  &I);
2665 
2666  if (SrcTy->isVectorTy()) {
2667  VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2668  VectorType *VDest = dyn_cast<VectorType>(DestTy);
2669  Assert(VSrc->getNumElements() == VDest->getNumElements(),
2670  "PtrToInt Vector width mismatch", &I);
2671  }
2672 
2673  visitInstruction(I);
2674 }
2675 
2676 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2677  // Get the source and destination types
2678  Type *SrcTy = I.getOperand(0)->getType();
2679  Type *DestTy = I.getType();
2680 
2681  Assert(SrcTy->isIntOrIntVectorTy(),
2682  "IntToPtr source must be an integral", &I);
2683  Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2684 
2685  if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2687  "inttoptr not supported for non-integral pointers");
2688 
2689  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2690  &I);
2691  if (SrcTy->isVectorTy()) {
2692  VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2693  VectorType *VDest = dyn_cast<VectorType>(DestTy);
2694  Assert(VSrc->getNumElements() == VDest->getNumElements(),
2695  "IntToPtr Vector width mismatch", &I);
2696  }
2697  visitInstruction(I);
2698 }
2699 
2700 void Verifier::visitBitCastInst(BitCastInst &I) {
2701  Assert(
2702  CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2703  "Invalid bitcast", &I);
2704  visitInstruction(I);
2705 }
2706 
2707 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2708  Type *SrcTy = I.getOperand(0)->getType();
2709  Type *DestTy = I.getType();
2710 
2711  Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2712  &I);
2713  Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2714  &I);
2716  "AddrSpaceCast must be between different address spaces", &I);
2717  if (SrcTy->isVectorTy())
2718  Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2719  "AddrSpaceCast vector pointer number of elements mismatch", &I);
2720  visitInstruction(I);
2721 }
2722 
2723 /// visitPHINode - Ensure that a PHI node is well formed.
2724 ///
2725 void Verifier::visitPHINode(PHINode &PN) {
2726  // Ensure that the PHI nodes are all grouped together at the top of the block.
2727  // This can be tested by checking whether the instruction before this is
2728  // either nonexistent (because this is begin()) or is a PHI node. If not,
2729  // then there is some other instruction before a PHI.
2730  Assert(&PN == &PN.getParent()->front() ||
2731  isa<PHINode>(--BasicBlock::iterator(&PN)),
2732  "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2733 
2734  // Check that a PHI doesn't yield a Token.
2735  Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2736 
2737  // Check that all of the values of the PHI node have the same type as the
2738  // result, and that the incoming blocks are really basic blocks.
2739  for (Value *IncValue : PN.incoming_values()) {
2740  Assert(PN.getType() == IncValue->getType(),
2741  "PHI node operands are not the same type as the result!", &PN);
2742  }
2743 
2744  // All other PHI node constraints are checked in the visitBasicBlock method.
2745 
2746  visitInstruction(PN);
2747 }
2748 
2749 void Verifier::visitCallBase(CallBase &Call) {
2751  "Called function must be a pointer!", Call);
2752  PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
2753 
2754  Assert(FPTy->getElementType()->isFunctionTy(),
2755  "Called function is not pointer to function type!", Call);
2756 
2757  Assert(FPTy->getElementType() == Call.getFunctionType(),
2758  "Called function is not the same type as the call!", Call);
2759 
2760  FunctionType *FTy = Call.getFunctionType();
2761 
2762  // Verify that the correct number of arguments are being passed
2763  if (FTy->isVarArg())
2764  Assert(Call.arg_size() >= FTy->getNumParams(),
2765  "Called function requires more parameters than were provided!",
2766  Call);
2767  else
2768  Assert(Call.arg_size() == FTy->getNumParams(),
2769  "Incorrect number of arguments passed to called function!", Call);
2770 
2771  // Verify that all arguments to the call match the function type.
2772  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2773  Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
2774  "Call parameter type does not match function signature!",
2775  Call.getArgOperand(i), FTy->getParamType(i), Call);
2776 
2777  AttributeList Attrs = Call.getAttributes();
2778 
2779  Assert(verifyAttributeCount(Attrs, Call.arg_size()),
2780  "Attribute after last parameter!", Call);
2781 
2782  if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2783  // Don't allow speculatable on call sites, unless the underlying function
2784  // declaration is also speculatable.
2785  Function *Callee =
2787  Assert(Callee && Callee->isSpeculatable(),
2788  "speculatable attribute may not apply to call sites", Call);
2789  }
2790 
2791  // Verify call attributes.
2792  verifyFunctionAttrs(FTy, Attrs, &Call);
2793 
2794  // Conservatively check the inalloca argument.
2795  // We have a bug if we can find that there is an underlying alloca without
2796  // inalloca.
2797  if (Call.hasInAllocaArgument()) {
2798  Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
2799  if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2800  Assert(AI->isUsedWithInAlloca(),
2801  "inalloca argument for call has mismatched alloca", AI, Call);
2802  }
2803 
2804  // For each argument of the callsite, if it has the swifterror argument,
2805  // make sure the underlying alloca/parameter it comes from has a swifterror as
2806  // well.
2807  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2808  if (Call.paramHasAttr(i, Attribute::SwiftError)) {
2809  Value *SwiftErrorArg = Call.getArgOperand(i);
2810  if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2811  Assert(AI->isSwiftError(),
2812  "swifterror argument for call has mismatched alloca", AI, Call);
2813  continue;
2814  }
2815  auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2816  Assert(ArgI,
2817  "swifterror argument should come from an alloca or parameter",
2818  SwiftErrorArg, Call);
2819  Assert(ArgI->hasSwiftErrorAttr(),
2820  "swifterror argument for call has mismatched parameter", ArgI,
2821  Call);
2822  }
2823 
2824  if (FTy->isVarArg()) {
2825  // FIXME? is 'nest' even legal here?
2826  bool SawNest = false;
2827  bool SawReturned = false;
2828 
2829  for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2830  if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2831  SawNest = true;
2832  if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2833  SawReturned = true;
2834  }
2835 
2836  // Check attributes on the varargs part.
2837  for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
2838  Type *Ty = Call.getArgOperand(Idx)->getType();
2839  AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2840  verifyParameterAttrs(ArgAttrs, Ty, &Call);
2841 
2842  if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2843  Assert(!SawNest, "More than one parameter has attribute nest!", Call);
2844  SawNest = true;
2845  }
2846 
2847  if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2848  Assert(!SawReturned, "More than one parameter has attribute returned!",
2849  Call);
2850  Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2851  "Incompatible argument and return types for 'returned' "
2852  "attribute",
2853  Call);
2854  SawReturned = true;
2855  }
2856 
2857  // Statepoint intrinsic is vararg but the wrapped function may be not.
2858  // Allow sret here and check the wrapped function in verifyStatepoint.
2859  if (!Call.getCalledFunction() ||
2860  Call.getCalledFunction()->getIntrinsicID() !=
2861  Intrinsic::experimental_gc_statepoint)
2862  Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2863  "Attribute 'sret' cannot be used for vararg call arguments!",
2864  Call);
2865 
2866  if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2867  Assert(Idx == Call.arg_size() - 1,
2868  "inalloca isn't on the last argument!", Call);
2869  }
2870  }
2871 
2872  // Verify that there's no metadata unless it's a direct call to an intrinsic.
2873  if (!Call.getCalledFunction() ||
2874  !Call.getCalledFunction()->getName().startswith("llvm.")) {
2875  for (Type *ParamTy : FTy->params()) {
2876  Assert(!ParamTy->isMetadataTy(),
2877  "Function has metadata parameter but isn't an intrinsic", Call);
2878  Assert(!ParamTy->isTokenTy(),
2879  "Function has token parameter but isn't an intrinsic", Call);
2880  }
2881  }
2882 
2883  // Verify that indirect calls don't return tokens.
2884  if (!Call.getCalledFunction())
2885  Assert(!FTy->getReturnType()->isTokenTy(),
2886  "Return type cannot be token for indirect call!");
2887 
2888  if (Function *F = Call.getCalledFunction())
2890  visitIntrinsicCall(ID, Call);
2891 
2892  // Verify that a callsite has at most one "deopt", at most one "funclet" and
2893  // at most one "gc-transition" operand bundle.
2894  bool FoundDeoptBundle = false, FoundFuncletBundle = false,
2895  FoundGCTransitionBundle = false;
2896  for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
2897  OperandBundleUse BU = Call.getOperandBundleAt(i);
2898  uint32_t Tag = BU.getTagID();
2899  if (Tag == LLVMContext::OB_deopt) {
2900  Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
2901  FoundDeoptBundle = true;
2902  } else if (Tag == LLVMContext::OB_gc_transition) {
2903  Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
2904  Call);
2905  FoundGCTransitionBundle = true;
2906  } else if (Tag == LLVMContext::OB_funclet) {
2907  Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
2908  FoundFuncletBundle = true;
2909  Assert(BU.Inputs.size() == 1,
2910  "Expected exactly one funclet bundle operand", Call);
2911  Assert(isa<FuncletPadInst>(BU.Inputs.front()),
2912  "Funclet bundle operands should correspond to a FuncletPadInst",
2913  Call);
2914  }
2915  }
2916 
2917  // Verify that each inlinable callsite of a debug-info-bearing function in a
2918  // debug-info-bearing function has a debug location attached to it. Failure to
2919  // do so causes assertion failures when the inliner sets up inline scope info.
2920  if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
2921  Call.getCalledFunction()->getSubprogram())
2922  AssertDI(Call.getDebugLoc(),
2923  "inlinable function call in a function with "
2924  "debug info must have a !dbg location",
2925  Call);
2926 
2927  visitInstruction(Call);
2928 }
2929 
2930 /// Two types are "congruent" if they are identical, or if they are both pointer
2931 /// types with different pointee types and the same address space.
2932 static bool isTypeCongruent(Type *L, Type *R) {
2933  if (L == R)
2934  return true;
2937  if (!PL || !PR)
2938  return false;
2939  return PL->getAddressSpace() == PR->getAddressSpace();
2940 }
2941 
2943  static const Attribute::AttrKind ABIAttrs[] = {
2944  Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
2945  Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
2946  Attribute::SwiftError};
2947  AttrBuilder Copy;
2948  for (auto AK : ABIAttrs) {
2949  if (Attrs.hasParamAttribute(I, AK))
2950  Copy.addAttribute(AK);
2951  }
2952  if (Attrs.hasParamAttribute(I, Attribute::Alignment))
2953  Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
2954  return Copy;
2955 }
2956 
2957 void Verifier::verifyMustTailCall(CallInst &CI) {
2958  Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
2959 
2960  // - The caller and callee prototypes must match. Pointer types of
2961  // parameters or return types may differ in pointee type, but not
2962  // address space.
2963  Function *F = CI.getParent()->getParent();
2964  FunctionType *CallerTy = F->getFunctionType();
2965  FunctionType *CalleeTy = CI.getFunctionType();
2966  if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
2967  Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
2968  "cannot guarantee tail call due to mismatched parameter counts",
2969  &CI);
2970  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2971  Assert(
2972  isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
2973  "cannot guarantee tail call due to mismatched parameter types", &CI);
2974  }
2975  }
2976  Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
2977  "cannot guarantee tail call due to mismatched varargs", &CI);
2978  Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
2979  "cannot guarantee tail call due to mismatched return types", &CI);
2980 
2981  // - The calling conventions of the caller and callee must match.
2982  Assert(F->getCallingConv() == CI.getCallingConv(),
2983  "cannot guarantee tail call due to mismatched calling conv", &CI);
2984 
2985  // - All ABI-impacting function attributes, such as sret, byval, inreg,
2986  // returned, and inalloca, must match.
2987  AttributeList CallerAttrs = F->getAttributes();
2988  AttributeList CalleeAttrs = CI.getAttributes();
2989  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2990  AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
2991  AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
2992  Assert(CallerABIAttrs == CalleeABIAttrs,
2993  "cannot guarantee tail call due to mismatched ABI impacting "
2994  "function attributes",
2995  &CI, CI.getOperand(I));
2996  }
2997 
2998  // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
2999  // or a pointer bitcast followed by a ret instruction.
3000  // - The ret instruction must return the (possibly bitcasted) value
3001  // produced by the call or void.
3002  Value *RetVal = &CI;
3003  Instruction *Next = CI.getNextNode();
3004 
3005  // Handle the optional bitcast.
3006  if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3007  Assert(BI->getOperand(0) == RetVal,
3008  "bitcast following musttail call must use the call", BI);
3009  RetVal = BI;
3010  Next = BI->getNextNode();
3011  }
3012 
3013  // Check the return.
3014  ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3015  Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3016  &CI);
3017  Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3018  "musttail call result must be returned", Ret);
3019 }
3020 
3021 void Verifier::visitCallInst(CallInst &CI) {
3022  visitCallBase(CI);
3023 
3024  if (CI.isMustTailCall())
3025  verifyMustTailCall(CI);
3026 }
3027 
3028 void Verifier::visitInvokeInst(InvokeInst &II) {
3029  visitCallBase(II);
3030 
3031  // Verify that the first non-PHI instruction of the unwind destination is an
3032  // exception handling instruction.
3033  Assert(
3034  II.getUnwindDest()->isEHPad(),
3035  "The unwind destination does not have an exception handling instruction!",
3036  &II);
3037 
3038  visitTerminator(II);
3039 }
3040 
3041 /// visitUnaryOperator - Check the argument to the unary operator.
3042 ///
3043 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3044  Assert(U.getType() == U.getOperand(0)->getType(),
3045  "Unary operators must have same type for"
3046  "operands and result!",
3047  &U);
3048 
3049  switch (U.getOpcode()) {
3050  // Check that floating-point arithmetic operators are only used with
3051  // floating-point operands.
3052  case Instruction::FNeg:
3054  "FNeg operator only works with float types!", &U);
3055  break;
3056  default:
3057  llvm_unreachable("Unknown UnaryOperator opcode!");
3058  }
3059 
3060  visitInstruction(U);
3061 }
3062 
3063 /// visitBinaryOperator - Check that both arguments to the binary operator are
3064 /// of the same type!
3065 ///
3066 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3067  Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3068  "Both operands to a binary operator are not of the same type!", &B);
3069 
3070  switch (B.getOpcode()) {
3071  // Check that integer arithmetic operators are only used with
3072  // integral operands.
3073  case Instruction::Add:
3074  case Instruction::Sub:
3075  case Instruction::Mul:
3076  case Instruction::SDiv:
3077  case Instruction::UDiv:
3078  case Instruction::SRem:
3079  case Instruction::URem:
3081  "Integer arithmetic operators only work with integral types!", &B);
3082  Assert(B.getType() == B.getOperand(0)->getType(),
3083  "Integer arithmetic operators must have same type "
3084  "for operands and result!",
3085  &B);
3086  break;
3087  // Check that floating-point arithmetic operators are only used with
3088  // floating-point operands.
3089  case Instruction::FAdd:
3090  case Instruction::FSub:
3091  case Instruction::FMul:
3092  case Instruction::FDiv:
3093  case Instruction::FRem:
3095  "Floating-point arithmetic operators only work with "
3096  "floating-point types!",
3097  &B);
3098  Assert(B.getType() == B.getOperand(0)->getType(),
3099  "Floating-point arithmetic operators must have same type "
3100  "for operands and result!",
3101  &B);
3102  break;
3103  // Check that logical operators are only used with integral operands.
3104  case Instruction::And:
3105  case Instruction::Or:
3106  case Instruction::Xor:
3108  "Logical operators only work with integral types!", &B);
3109  Assert(B.getType() == B.getOperand(0)->getType(),
3110  "Logical operators must have same type for operands and result!",
3111  &B);
3112  break;
3113  case Instruction::Shl:
3114  case Instruction::LShr:
3115  case Instruction::AShr:
3117  "Shifts only work with integral types!", &B);
3118  Assert(B.getType() == B.getOperand(0)->getType(),
3119  "Shift return type must be same as operands!", &B);
3120  break;
3121  default:
3122  llvm_unreachable("Unknown BinaryOperator opcode!");
3123  }
3124 
3125  visitInstruction(B);
3126 }
3127 
3128 void Verifier::visitICmpInst(ICmpInst &IC) {
3129  // Check that the operands are the same type
3130  Type *Op0Ty = IC.getOperand(0)->getType();
3131  Type *Op1Ty = IC.getOperand(1)->getType();
3132  Assert(Op0Ty == Op1Ty,
3133  "Both operands to ICmp instruction are not of the same type!", &IC);
3134  // Check that the operands are the right type
3135  Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3136  "Invalid operand types for ICmp instruction", &IC);
3137  // Check that the predicate is valid.
3138  Assert(IC.isIntPredicate(),
3139  "Invalid predicate in ICmp instruction!", &IC);
3140 
3141  visitInstruction(IC);
3142 }
3143 
3144 void Verifier::visitFCmpInst(FCmpInst &FC) {
3145  // Check that the operands are the same type
3146  Type *Op0Ty = FC.getOperand(0)->getType();
3147  Type *Op1Ty = FC.getOperand(1)->getType();
3148  Assert(Op0Ty == Op1Ty,
3149  "Both operands to FCmp instruction are not of the same type!", &FC);
3150  // Check that the operands are the right type
3151  Assert(Op0Ty->isFPOrFPVectorTy(),
3152  "Invalid operand types for FCmp instruction", &FC);
3153  // Check that the predicate is valid.
3154  Assert(FC.isFPPredicate(),
3155  "Invalid predicate in FCmp instruction!", &FC);
3156 
3157  visitInstruction(FC);
3158 }
3159 
3160 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3161  Assert(
3163  "Invalid extractelement operands!", &EI);
3164  visitInstruction(EI);
3165 }
3166 
3167 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3169  IE.getOperand(2)),
3170  "Invalid insertelement operands!", &IE);
3171  visitInstruction(IE);
3172 }
3173 
3174 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3176  SV.getOperand(2)),
3177  "Invalid shufflevector operands!", &SV);
3178  visitInstruction(SV);
3179 }
3180 
3181 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3182  Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3183 
3184  Assert(isa<PointerType>(TargetTy),
3185  "GEP base pointer is not a vector or a vector of pointers", &GEP);
3186  Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3187 
3188  SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3189  Assert(all_of(
3190  Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3191  "GEP indexes must be integers", &GEP);
3192  Type *ElTy =
3194  Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3195 
3196  Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3197  GEP.getResultElementType() == ElTy,
3198  "GEP is not of right type for indices!", &GEP, ElTy);
3199 
3200  if (GEP.getType()->isVectorTy()) {
3201  // Additional checks for vector GEPs.
3202  unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3203  if (GEP.getPointerOperandType()->isVectorTy())
3204  Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3205  "Vector GEP result width doesn't match operand's", &GEP);
3206  for (Value *Idx : Idxs) {
3207  Type *IndexTy = Idx->getType();
3208  if (IndexTy->isVectorTy()) {
3209  unsigned IndexWidth = IndexTy->getVectorNumElements();
3210  Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3211  }
3212  Assert(IndexTy->isIntOrIntVectorTy(),
3213  "All GEP indices should be of integer type");
3214  }
3215  }
3216 
3217  if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3218  Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3219  "GEP address space doesn't match type", &GEP);
3220  }
3221 
3222  visitInstruction(GEP);
3223 }
3224 
3225 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3226  return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3227 }
3228 
3229 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3230  assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3231  "precondition violation");
3232 
3233  unsigned NumOperands = Range->getNumOperands();
3234  Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3235  unsigned NumRanges = NumOperands / 2;
3236  Assert(NumRanges >= 1, "It should have at least one range!", Range);
3237 
3238  ConstantRange LastRange(1); // Dummy initial value
3239  for (unsigned i = 0; i < NumRanges; ++i) {
3240  ConstantInt *Low =
3241  mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3242  Assert(Low, "The lower limit must be an integer!", Low);
3243  ConstantInt *High =
3244  mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3245  Assert(High, "The upper limit must be an integer!", High);
3246  Assert(High->getType() == Low->getType() && High->getType() == Ty,
3247  "Range types must match instruction type!", &I);
3248 
3249  APInt HighV = High->getValue();
3250  APInt LowV = Low->getValue();
3251  ConstantRange CurRange(LowV, HighV);
3252  Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3253  "Range must not be empty!", Range);
3254  if (i != 0) {
3255  Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3256  "Intervals are overlapping", Range);
3257  Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3258  Range);
3259  Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3260  Range);
3261  }
3262  LastRange = ConstantRange(LowV, HighV);
3263  }
3264  if (NumRanges > 2) {
3265  APInt FirstLow =
3266  mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3267  APInt FirstHigh =
3268  mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3269  ConstantRange FirstRange(FirstLow, FirstHigh);
3270  Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3271  "Intervals are overlapping", Range);
3272  Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3273  Range);
3274  }
3275 }
3276 
3277 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3278  unsigned Size = DL.getTypeSizeInBits(Ty);
3279  Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3280  Assert(!(Size & (Size - 1)),
3281  "atomic memory access' operand must have a power-of-two size", Ty, I);
3282 }
3283 
3284 void Verifier::visitLoadInst(LoadInst &LI) {
3286  Assert(PTy, "Load operand must be a pointer.", &LI);
3287  Type *ElTy = LI.getType();
3289  "huge alignment values are unsupported", &LI);
3290  Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3291  if (LI.isAtomic()) {
3294  "Load cannot have Release ordering", &LI);
3295  Assert(LI.getAlignment() != 0,
3296  "Atomic load must specify explicit alignment", &LI);
3297  Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3298  "atomic load operand must have integer, pointer, or floating point "
3299  "type!",
3300  ElTy, &LI);
3301  checkAtomicMemAccessSize(ElTy, &LI);
3302  } else {
3304  "Non-atomic load cannot have SynchronizationScope specified", &LI);
3305  }
3306 
3307  visitInstruction(LI);
3308 }
3309 
3310 void Verifier::visitStoreInst(StoreInst &SI) {
3312  Assert(PTy, "Store operand must be a pointer.", &SI);
3313  Type *ElTy = PTy->getElementType();
3314  Assert(ElTy == SI.getOperand(0)->getType(),
3315  "Stored value type does not match pointer operand type!", &SI, ElTy);
3317  "huge alignment values are unsupported", &SI);
3318  Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3319  if (SI.isAtomic()) {
3322  "Store cannot have Acquire ordering", &SI);
3323  Assert(SI.getAlignment() != 0,
3324  "Atomic store must specify explicit alignment", &SI);
3325  Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3326  "atomic store operand must have integer, pointer, or floating point "
3327  "type!",
3328  ElTy, &SI);
3329  checkAtomicMemAccessSize(ElTy, &SI);
3330  } else {
3332  "Non-atomic store cannot have SynchronizationScope specified", &SI);
3333  }
3334  visitInstruction(SI);
3335 }
3336 
3337 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
3338 void Verifier::verifySwiftErrorCall(CallBase &Call,
3339  const Value *SwiftErrorVal) {
3340  unsigned Idx = 0;
3341  for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
3342  if (*I == SwiftErrorVal) {
3343  Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
3344  "swifterror value when used in a callsite should be marked "
3345  "with swifterror attribute",
3346  SwiftErrorVal, Call);
3347  }
3348  }
3349 }
3350 
3351 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3352  // Check that swifterror value is only used by loads, stores, or as
3353  // a swifterror argument.
3354  for (const User *U : SwiftErrorVal->users()) {
3355  Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3356  isa<InvokeInst>(U),
3357  "swifterror value can only be loaded and stored from, or "
3358  "as a swifterror argument!",
3359  SwiftErrorVal, U);
3360  // If it is used by a store, check it is the second operand.
3361  if (auto StoreI = dyn_cast<StoreInst>(U))
3362  Assert(StoreI->getOperand(1) == SwiftErrorVal,
3363  "swifterror value should be the second operand when used "
3364  "by stores", SwiftErrorVal, U);
3365  if (auto *Call = dyn_cast<CallBase>(U))
3366  verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3367  }
3368 }
3369 
3370 void Verifier::visitAllocaInst(AllocaInst &AI) {
3371  SmallPtrSet<Type*, 4> Visited;
3372  PointerType *PTy = AI.getType();
3373  // TODO: Relax this restriction?
3375  "Allocation instruction pointer not in the stack address space!",
3376  &AI);
3377  Assert(AI.getAllocatedType()->isSized(&Visited),
3378  "Cannot allocate unsized type", &AI);
3380  "Alloca array size must have integer type", &AI);
3382  "huge alignment values are unsupported", &AI);
3383 
3384  if (AI.isSwiftError()) {
3385  verifySwiftErrorValue(&AI);
3386  }
3387 
3388  visitInstruction(AI);
3389 }
3390 
3391 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3392 
3393  // FIXME: more conditions???
3395  "cmpxchg instructions must be atomic.", &CXI);
3397  "cmpxchg instructions must be atomic.", &CXI);
3399  "cmpxchg instructions cannot be unordered.", &CXI);
3401  "cmpxchg instructions cannot be unordered.", &CXI);
3403  "cmpxchg instructions failure argument shall be no stronger than the "
3404  "success argument",
3405  &CXI);
3408  "cmpxchg failure ordering cannot include release semantics", &CXI);
3409 
3410  PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3411  Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3412  Type *ElTy = PTy->getElementType();
3413  Assert(ElTy->isIntOrPtrTy(),
3414  "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3415  checkAtomicMemAccessSize(ElTy, &CXI);
3416  Assert(ElTy == CXI.getOperand(1)->getType(),
3417  "Expected value type does not match pointer operand type!", &CXI,
3418  ElTy);
3419  Assert(ElTy == CXI.getOperand(2)->getType(),
3420  "Stored value type does not match pointer operand type!", &CXI, ElTy);
3421  visitInstruction(CXI);
3422 }
3423 
3424 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3426  "atomicrmw instructions must be atomic.", &RMWI);
3428  "atomicrmw instructions cannot be unordered.", &RMWI);
3429  auto Op = RMWI.getOperation();
3430  PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3431  Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3432  Type *ElTy = PTy->getElementType();
3433  if (Op == AtomicRMWInst::Xchg) {
3434  Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " +
3436  " operand must have integer or floating point type!",
3437  &RMWI, ElTy);
3438  } else if (AtomicRMWInst::isFPOperation(Op)) {
3439  Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
3441  " operand must have floating point type!",
3442  &RMWI, ElTy);
3443  } else {
3444  Assert(ElTy->isIntegerTy(), "atomicrmw " +
3446  " operand must have integer type!",
3447  &RMWI, ElTy);
3448  }
3449  checkAtomicMemAccessSize(ElTy, &RMWI);
3450  Assert(ElTy == RMWI.getOperand(1)->getType(),
3451  "Argument value type does not match pointer operand type!", &RMWI,
3452  ElTy);
3454  "Invalid binary operation!", &RMWI);
3455  visitInstruction(RMWI);
3456 }
3457 
3458 void Verifier::visitFenceInst(FenceInst &FI) {
3459  const AtomicOrdering Ordering = FI.getOrdering();
3460  Assert(Ordering == AtomicOrdering::Acquire ||
3461  Ordering == AtomicOrdering::Release ||
3462  Ordering == AtomicOrdering::AcquireRelease ||
3464  "fence instructions may only have acquire, release, acq_rel, or "
3465  "seq_cst ordering.",
3466  &FI);
3467  visitInstruction(FI);
3468 }
3469 
3470 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3472  EVI.getIndices()) == EVI.getType(),
3473  "Invalid ExtractValueInst operands!", &EVI);
3474 
3475  visitInstruction(EVI);
3476 }
3477 
3478 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3480  IVI.getIndices()) ==
3481  IVI.getOperand(1)->getType(),
3482  "Invalid InsertValueInst operands!", &IVI);
3483 
3484  visitInstruction(IVI);
3485 }
3486 
3487 static Value *getParentPad(Value *EHPad) {
3488  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3489  return FPI->getParentPad();
3490 
3491  return cast<CatchSwitchInst>(EHPad)->getParentPad();
3492 }
3493 
3494 void Verifier::visitEHPadPredecessors(Instruction &I) {
3495  assert(I.isEHPad());
3496 
3497  BasicBlock *BB = I.getParent();
3498  Function *F = BB->getParent();
3499 
3500  Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3501 
3502  if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3503  // The landingpad instruction defines its parent as a landing pad block. The
3504  // landing pad block may be branched to only by the unwind edge of an
3505  // invoke.
3506  for (BasicBlock *PredBB : predecessors(BB)) {
3507  const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3508  Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3509  "Block containing LandingPadInst must be jumped to "
3510  "only by the unwind edge of an invoke.",
3511  LPI);
3512  }
3513  return;
3514  }
3515  if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3516  if (!pred_empty(BB))
3517  Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3518  "Block containg CatchPadInst must be jumped to "
3519  "only by its catchswitch.",
3520  CPI);
3521  Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3522  "Catchswitch cannot unwind to one of its catchpads",
3523  CPI->getCatchSwitch(), CPI);
3524  return;
3525  }
3526 
3527  // Verify that each pred has a legal terminator with a legal to/from EH
3528  // pad relationship.
3529  Instruction *ToPad = &I;
3530  Value *ToPadParent = getParentPad(ToPad);
3531  for (BasicBlock *PredBB : predecessors(BB)) {
3532  Instruction *TI = PredBB->getTerminator();
3533  Value *FromPad;
3534  if (auto *II = dyn_cast<InvokeInst>(TI)) {
3535  Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3536  "EH pad must be jumped to via an unwind edge", ToPad, II);
3537  if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3538  FromPad = Bundle->Inputs[0];
3539  else
3540  FromPad = ConstantTokenNone::get(II->getContext());
3541  } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3542  FromPad = CRI->getOperand(0);
3543  Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3544  } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3545  FromPad = CSI;
3546  } else {
3547  Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3548  }
3549 
3550  // The edge may exit from zero or more nested pads.
3551  SmallSet<Value *, 8> Seen;
3552  for (;; FromPad = getParentPad(FromPad)) {
3553  Assert(FromPad != ToPad,
3554  "EH pad cannot handle exceptions raised within it", FromPad, TI);
3555  if (FromPad == ToPadParent) {
3556  // This is a legal unwind edge.
3557  break;
3558  }
3559  Assert(!isa<ConstantTokenNone>(FromPad),
3560  "A single unwind edge may only enter one EH pad", TI);
3561  Assert(Seen.insert(FromPad).second,
3562  "EH pad jumps through a cycle of pads", FromPad);
3563  }
3564  }
3565 }
3566 
3567 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3568  // The landingpad instruction is ill-formed if it doesn't have any clauses and
3569  // isn't a cleanup.
3570  Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3571  "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3572 
3573  visitEHPadPredecessors(LPI);
3574 
3575  if (!LandingPadResultTy)
3576  LandingPadResultTy = LPI.getType();
3577  else
3578  Assert(LandingPadResultTy == LPI.getType(),
3579  "The landingpad instruction should have a consistent result type "
3580  "inside a function.",
3581  &LPI);
3582 
3583  Function *F = LPI.getParent()->getParent();
3584  Assert(F->hasPersonalityFn(),
3585  "LandingPadInst needs to be in a function with a personality.", &LPI);
3586 
3587  // The landingpad instruction must be the first non-PHI instruction in the
3588  // block.
3589  Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3590  "LandingPadInst not the first non-PHI instruction in the block.",
3591  &LPI);
3592 
3593  for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3594  Constant *Clause = LPI.getClause(i);
3595  if (LPI.isCatch(i)) {
3596  Assert(isa<PointerType>(Clause->getType()),
3597  "Catch operand does not have pointer type!", &LPI);
3598  } else {
3599  Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3600  Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3601  "Filter operand is not an array of constants!", &LPI);
3602  }
3603  }
3604 
3605  visitInstruction(LPI);
3606 }
3607 
3608 void Verifier::visitResumeInst(ResumeInst &RI) {
3610  "ResumeInst needs to be in a function with a personality.", &RI);
3611 
3612  if (!LandingPadResultTy)
3613  LandingPadResultTy = RI.getValue()->getType();
3614  else
3615  Assert(LandingPadResultTy == RI.getValue()->getType(),
3616  "The resume instruction should have a consistent result type "
3617  "inside a function.",
3618  &RI);
3619 
3620  visitTerminator(RI);
3621 }
3622 
3623 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3624  BasicBlock *BB = CPI.getParent();
3625 
3626  Function *F = BB->getParent();
3627  Assert(F->hasPersonalityFn(),
3628  "CatchPadInst needs to be in a function with a personality.", &CPI);
3629 
3630  Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3631  "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3632  CPI.getParentPad());
3633 
3634  // The catchpad instruction must be the first non-PHI instruction in the
3635  // block.
3636  Assert(BB->getFirstNonPHI() == &CPI,
3637  "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3638 
3639  visitEHPadPredecessors(CPI);
3640  visitFuncletPadInst(CPI);
3641 }
3642 
3643 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3644  Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3645  "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3646  CatchReturn.getOperand(0));
3647 
3648  visitTerminator(CatchReturn);
3649 }
3650 
3651 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3652  BasicBlock *BB = CPI.getParent();
3653 
3654  Function *F = BB->getParent();
3655  Assert(F->hasPersonalityFn(),
3656  "CleanupPadInst needs to be in a function with a personality.", &CPI);
3657 
3658  // The cleanuppad instruction must be the first non-PHI instruction in the
3659  // block.
3660  Assert(BB->getFirstNonPHI() == &CPI,
3661  "CleanupPadInst not the first non-PHI instruction in the block.",
3662  &CPI);
3663 
3664  auto *ParentPad = CPI.getParentPad();
3665  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3666  "CleanupPadInst has an invalid parent.", &CPI);
3667 
3668  visitEHPadPredecessors(CPI);
3669  visitFuncletPadInst(CPI);
3670 }
3671 
3672 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3673  User *FirstUser = nullptr;
3674  Value *FirstUnwindPad = nullptr;
3675  SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3677 
3678  while (!Worklist.empty()) {
3679  FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3680  Assert(Seen.insert(CurrentPad).second,
3681  "FuncletPadInst must not be nested within itself", CurrentPad);
3682  Value *UnresolvedAncestorPad = nullptr;
3683  for (User *U : CurrentPad->users()) {
3684  BasicBlock *UnwindDest;
3685  if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3686  UnwindDest = CRI->getUnwindDest();
3687  } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3688  // We allow catchswitch unwind to caller to nest
3689  // within an outer pad that unwinds somewhere else,
3690  // because catchswitch doesn't have a nounwind variant.
3691  // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3692  if (CSI->unwindsToCaller())
3693  continue;
3694  UnwindDest = CSI->getUnwindDest();
3695  } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3696  UnwindDest = II->getUnwindDest();
3697  } else if (isa<CallInst>(U)) {
3698  // Calls which don't unwind may be found inside funclet
3699  // pads that unwind somewhere else. We don't *require*
3700  // such calls to be annotated nounwind.
3701  continue;
3702  } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3703  // The unwind dest for a cleanup can only be found by
3704  // recursive search. Add it to the worklist, and we'll
3705  // search for its first use that determines where it unwinds.
3706  Worklist.push_back(CPI);
3707  continue;
3708  } else {
3709  Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3710  continue;
3711  }
3712 
3713  Value *UnwindPad;
3714  bool ExitsFPI;
3715  if (UnwindDest) {
3716  UnwindPad = UnwindDest->getFirstNonPHI();
3717  if (!cast<Instruction>(UnwindPad)->isEHPad())
3718  continue;
3719  Value *UnwindParent = getParentPad(UnwindPad);
3720  // Ignore unwind edges that don't exit CurrentPad.
3721  if (UnwindParent == CurrentPad)
3722  continue;
3723  // Determine whether the original funclet pad is exited,
3724  // and if we are scanning nested pads determine how many
3725  // of them are exited so we can stop searching their
3726  // children.
3727  Value *ExitedPad = CurrentPad;
3728  ExitsFPI = false;
3729  do {
3730  if (ExitedPad == &FPI) {
3731  ExitsFPI = true;
3732  // Now we can resolve any ancestors of CurrentPad up to
3733  // FPI, but not including FPI since we need to make sure
3734  // to check all direct users of FPI for consistency.
3735  UnresolvedAncestorPad = &FPI;
3736  break;
3737  }
3738  Value *ExitedParent = getParentPad(ExitedPad);
3739  if (ExitedParent == UnwindParent) {
3740  // ExitedPad is the ancestor-most pad which this unwind
3741  // edge exits, so we can resolve up to it, meaning that
3742  // ExitedParent is the first ancestor still unresolved.
3743  UnresolvedAncestorPad = ExitedParent;
3744  break;
3745  }
3746  ExitedPad = ExitedParent;
3747  } while (!isa<ConstantTokenNone>(ExitedPad));
3748  } else {
3749  // Unwinding to caller exits all pads.
3750  UnwindPad = ConstantTokenNone::get(FPI.getContext());
3751  ExitsFPI = true;
3752  UnresolvedAncestorPad = &FPI;
3753  }
3754 
3755  if (ExitsFPI) {
3756  // This unwind edge exits FPI. Make sure it agrees with other
3757  // such edges.
3758  if (FirstUser) {
3759  Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3760  "pad must have the same unwind "
3761  "dest",
3762  &FPI, U, FirstUser);
3763  } else {
3764  FirstUser = U;
3765  FirstUnwindPad = UnwindPad;
3766  // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3767  if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3768  getParentPad(UnwindPad) == getParentPad(&FPI))
3769  SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
3770  }
3771  }
3772  // Make sure we visit all uses of FPI, but for nested pads stop as
3773  // soon as we know where they unwind to.
3774  if (CurrentPad != &FPI)
3775  break;
3776  }
3777  if (UnresolvedAncestorPad) {
3778  if (CurrentPad == UnresolvedAncestorPad) {
3779  // When CurrentPad is FPI itself, we don't mark it as resolved even if
3780  // we've found an unwind edge that exits it, because we need to verify
3781  // all direct uses of FPI.
3782  assert(CurrentPad == &FPI);
3783  continue;
3784  }
3785  // Pop off the worklist any nested pads that we've found an unwind
3786  // destination for. The pads on the worklist are the uncles,
3787  // great-uncles, etc. of CurrentPad. We've found an unwind destination
3788  // for all ancestors of CurrentPad up to but not including
3789  // UnresolvedAncestorPad.
3790  Value *ResolvedPad = CurrentPad;
3791  while (!Worklist.empty()) {
3792  Value *UnclePad = Worklist.back();
3793  Value *AncestorPad = getParentPad(UnclePad);
3794  // Walk ResolvedPad up the ancestor list until we either find the
3795  // uncle's parent or the last resolved ancestor.
3796  while (ResolvedPad != AncestorPad) {
3797  Value *ResolvedParent = getParentPad(ResolvedPad);
3798  if (ResolvedParent == UnresolvedAncestorPad) {
3799  break;
3800  }
3801  ResolvedPad = ResolvedParent;
3802  }
3803  // If the resolved ancestor search didn't find the uncle's parent,
3804  // then the uncle is not yet resolved.
3805  if (ResolvedPad != AncestorPad)
3806  break;
3807  // This uncle is resolved, so pop it from the worklist.
3808  Worklist.pop_back();
3809  }
3810  }
3811  }
3812 
3813  if (FirstUnwindPad) {
3814  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3815  BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3816  Value *SwitchUnwindPad;
3817  if (SwitchUnwindDest)
3818  SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3819  else
3820  SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3821  Assert(SwitchUnwindPad == FirstUnwindPad,
3822  "Unwind edges out of a catch must have the same unwind dest as "
3823  "the parent catchswitch",
3824  &FPI, FirstUser, CatchSwitch);
3825  }
3826  }
3827 
3828  visitInstruction(FPI);
3829 }
3830 
3831 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3832  BasicBlock *BB = CatchSwitch.getParent();
3833 
3834  Function *F = BB->getParent();
3835  Assert(F->hasPersonalityFn(),
3836  "CatchSwitchInst needs to be in a function with a personality.",
3837  &CatchSwitch);
3838 
3839  // The catchswitch instruction must be the first non-PHI instruction in the
3840  // block.
3841  Assert(BB->getFirstNonPHI() == &CatchSwitch,
3842  "CatchSwitchInst not the first non-PHI instruction in the block.",
3843  &CatchSwitch);
3844 
3845  auto *ParentPad = CatchSwitch.getParentPad();
3846  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3847  "CatchSwitchInst has an invalid parent.", ParentPad);
3848 
3849  if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3850  Instruction *I = UnwindDest->getFirstNonPHI();
3851  Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3852  "CatchSwitchInst must unwind to an EH block which is not a "
3853  "landingpad.",
3854  &CatchSwitch);
3855 
3856  // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3857  if (getParentPad(I) == ParentPad)
3858  SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3859  }
3860 
3861  Assert(CatchSwitch.getNumHandlers() != 0,
3862  "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3863 
3864  for (BasicBlock *Handler : CatchSwitch.handlers()) {
3865  Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3866  "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3867  }
3868 
3869  visitEHPadPredecessors(CatchSwitch);
3870  visitTerminator(CatchSwitch);
3871 }
3872 
3873 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3874  Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3875  "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3876  CRI.getOperand(0));
3877 
3878  if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
3879  Instruction *I = UnwindDest->getFirstNonPHI();
3880  Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3881  "CleanupReturnInst must unwind to an EH block which is not a "
3882  "landingpad.",
3883  &CRI);
3884  }
3885 
3886  visitTerminator(CRI);
3887 }
3888 
3889 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
3890  Instruction *Op = cast<Instruction>(I.getOperand(i));
3891  // If the we have an invalid invoke, don't try to compute the dominance.
3892  // We already reject it in the invoke specific checks and the dominance
3893  // computation doesn't handle multiple edges.
3894  if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
3895  if (II->getNormalDest() == II->getUnwindDest())
3896  return;
3897  }
3898 
3899  // Quick check whether the def has already been encountered in the same block.
3900  // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI
3901  // uses are defined to happen on the incoming edge, not at the instruction.
3902  //
3903  // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
3904  // wrapping an SSA value, assert that we've already encountered it. See
3905  // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
3906  if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
3907  return;
3908 
3909  const Use &U = I.getOperandUse(i);
3910  Assert(DT.dominates(Op, U),
3911  "Instruction does not dominate all uses!", Op, &I);
3912 }
3913 
3914 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
3915  Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
3916  "apply only to pointer types", &I);
3917  Assert(isa<LoadInst>(I),
3918  "dereferenceable, dereferenceable_or_null apply only to load"
3919  " instructions, use attributes for calls or invokes", &I);
3920  Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
3921  "take one operand!", &I);
3922  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
3923  Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
3924  "dereferenceable_or_null metadata value must be an i64!", &I);
3925 }
3926 
3927 /// verifyInstruction - Verify that an instruction is well formed.
3928 ///
3929 void Verifier::visitInstruction(Instruction &I) {
3930  BasicBlock *BB = I.getParent();
3931  Assert(BB, "Instruction not embedded in basic block!", &I);
3932 
3933  if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
3934  for (User *U : I.users()) {
3935  Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
3936  "Only PHI nodes may reference their own value!", &I);
3937  }
3938  }
3939 
3940  // Check that void typed values don't have names
3941  Assert(!I.getType()->isVoidTy() || !I.hasName(),
3942  "Instruction has a name, but provides a void value!", &I);
3943 
3944  // Check that the return value of the instruction is either void or a legal
3945  // value type.
3946  Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
3947  "Instruction returns a non-scalar type!", &I);
3948 
3949  // Check that the instruction doesn't produce metadata. Calls are already
3950  // checked against the callee type.
3951  Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
3952  "Invalid use of metadata!", &I);
3953 
3954  // Check that all uses of the instruction, if they are instructions
3955  // themselves, actually have parent basic blocks. If the use is not an
3956  // instruction, it is an error!
3957  for (Use &U : I.uses()) {
3958  if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
3959  Assert(Used->getParent() != nullptr,
3960  "Instruction referencing"
3961  " instruction not embedded in a basic block!",
3962  &I, Used);
3963  else {
3964  CheckFailed("Use of instruction is not an instruction!", U);
3965  return;
3966  }
3967  }
3968 
3969  // Get a pointer to the call base of the instruction if it is some form of
3970  // call.
3971  const CallBase *CBI = dyn_cast<CallBase>(&I);
3972 
3973  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
3974  Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
3975 
3976  // Check to make sure that only first-class-values are operands to
3977  // instructions.
3978  if (!I.getOperand(i)->getType()->isFirstClassType()) {
3979  Assert(false, "Instruction operands must be first-class values!", &I);
3980  }
3981 
3982  if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
3983  // Check to make sure that the "address of" an intrinsic function is never
3984  // taken.
3985  Assert(!F->isIntrinsic() ||
3986  (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
3987  "Cannot take the address of an intrinsic!", &I);
3988  Assert(
3989  !F->isIntrinsic() || isa<CallInst>(I) ||
3990  F->getIntrinsicID() == Intrinsic::donothing ||
3991  F->getIntrinsicID() == Intrinsic::coro_resume ||
3992  F->getIntrinsicID() == Intrinsic::coro_destroy ||
3993  F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
3994  F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
3995  F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint,
3996  "Cannot invoke an intrinsic other than donothing, patchpoint, "
3997  "statepoint, coro_resume or coro_destroy",
3998  &I);
3999  Assert(F->getParent() == &M, "Referencing function in another module!",
4000  &I, &M, F, F->getParent());
4001  } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4002  Assert(OpBB->getParent() == BB->getParent(),
4003  "Referring to a basic block in another function!", &I);
4004  } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4005  Assert(OpArg->getParent() == BB->getParent(),
4006  "Referring to an argument in another function!", &I);
4007  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4008  Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
4009  &M, GV, GV->getParent());
4010  } else if (isa<Instruction>(I.getOperand(i))) {
4011  verifyDominatesUse(I, i);
4012  } else if (isa<InlineAsm>(I.getOperand(i))) {
4013  Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4014  "Cannot take the address of an inline asm!", &I);
4015  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4016  if (CE->getType()->isPtrOrPtrVectorTy() ||
4018  // If we have a ConstantExpr pointer, we need to see if it came from an
4019  // illegal bitcast. If the datalayout string specifies non-integral
4020  // address spaces then we also need to check for illegal ptrtoint and
4021  // inttoptr expressions.
4022  visitConstantExprsRecursively(CE);
4023  }
4024  }
4025  }
4026 
4027  if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4029  "fpmath requires a floating point result!", &I);
4030  Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4031  if (ConstantFP *CFP0 =
4032  mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4033  const APFloat &Accuracy = CFP0->getValueAPF();
4034  Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4035  "fpmath accuracy must have float type", &I);
4036  Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4037  "fpmath accuracy not a positive number!", &I);
4038  } else {
4039  Assert(false, "invalid fpmath accuracy!", &I);
4040  }
4041  }
4042 
4043  if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4044  Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4045  "Ranges are only for loads, calls and invokes!", &I);
4046  visitRangeMetadata(I, Range, I.getType());
4047  }
4048 
4050  Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4051  &I);
4052  Assert(isa<LoadInst>(I),
4053  "nonnull applies only to load instructions, use attributes"
4054  " for calls or invokes",
4055  &I);
4056  }
4057 
4059  visitDereferenceableMetadata(I, MD);
4060 
4062  visitDereferenceableMetadata(I, MD);
4063 
4064  if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4065  TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4066 
4067  if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4068  Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4069  &I);
4070  Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4071  "use attributes for calls or invokes", &I);
4072  Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4073  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4074  Assert(CI && CI->getType()->isIntegerTy(64),
4075  "align metadata value must be an i64!", &I);
4076  uint64_t Align = CI->getZExtValue();
4077  Assert(isPowerOf2_64(Align),
4078  "align metadata value must be a power of 2!", &I);
4080  "alignment is larger that implementation defined limit", &I);
4081  }
4082 
4083  if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4084  AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4085  visitMDNode(*N);
4086  }
4087 
4088  if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
4089  verifyFragmentExpression(*DII);
4090 
4091  InstsInThisBlock.insert(&I);
4092 }
4093 
4094 /// Allow intrinsics to be verified in different ways.
4095 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4096  Function *IF = Call.getCalledFunction();
4097  Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4098  IF);
4099 
4100  // Verify that the intrinsic prototype lines up with what the .td files
4101  // describe.
4102  FunctionType *IFTy = IF->getFunctionType();
4103  bool IsVarArg = IFTy->isVarArg();
4104 
4106  getIntrinsicInfoTableEntries(ID, Table);
4108 
4109  SmallVector<Type *, 4> ArgTys;
4110  Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(),
4111  TableRef, ArgTys),
4112  "Intrinsic has incorrect return type!", IF);
4113  for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
4114  Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i),
4115  TableRef, ArgTys),
4116  "Intrinsic has incorrect argument type!", IF);
4117 
4118  // Verify if the intrinsic call matches the vararg property.
4119  if (IsVarArg)
4120  Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4121  "Intrinsic was not defined with variable arguments!", IF);
4122  else
4123  Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4124  "Callsite was not defined with variable arguments!", IF);
4125 
4126  // All descriptors should be absorbed by now.
4127  Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4128 
4129  // Now that we have the intrinsic ID and the actual argument types (and we
4130  // know they are legal for the intrinsic!) get the intrinsic name through the
4131  // usual means. This allows us to verify the mangling of argument types into
4132  // the name.
4133  const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4134  Assert(ExpectedName == IF->getName(),
4135  "Intrinsic name not mangled correctly for type arguments! "
4136  "Should be: " +
4137  ExpectedName,
4138  IF);
4139 
4140  // If the intrinsic takes MDNode arguments, verify that they are either global
4141  // or are local to *this* function.
4142  for (Value *V : Call.args())
4143  if (auto *MD = dyn_cast<MetadataAsValue>(V))
4144  visitMetadataAsValue(*MD, Call.getCaller());
4145 
4146  switch (ID) {
4147  default:
4148  break;
4149  case Intrinsic::coro_id: {
4150  auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4151  if (isa<ConstantPointerNull>(InfoArg))
4152  break;
4153  auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4154  Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4155  "info argument of llvm.coro.begin must refer to an initialized "
4156  "constant");
4157  Constant *Init = GV->getInitializer();
4158  Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4159  "info argument of llvm.coro.begin must refer to either a struct or "
4160  "an array");
4161  break;
4162  }
4163  case Intrinsic::ctlz: // llvm.ctlz
4164  case Intrinsic::cttz: // llvm.cttz
4165  Assert(isa<ConstantInt>(Call.getArgOperand(1)),
4166  "is_zero_undef argument of bit counting intrinsics must be a "
4167  "constant int",
4168  Call);
4169  break;
4170  case Intrinsic::experimental_constrained_fadd:
4171  case Intrinsic::experimental_constrained_fsub:
4172  case Intrinsic::experimental_constrained_fmul:
4173  case Intrinsic::experimental_constrained_fdiv:
4174  case Intrinsic::experimental_constrained_frem:
4175  case Intrinsic::experimental_constrained_fma:
4176  case Intrinsic::experimental_constrained_sqrt:
4177  case Intrinsic::experimental_constrained_pow:
4178  case Intrinsic::experimental_constrained_powi:
4179  case Intrinsic::experimental_constrained_sin:
4180  case Intrinsic::experimental_constrained_cos:
4181  case Intrinsic::experimental_constrained_exp:
4182  case Intrinsic::experimental_constrained_exp2:
4183  case Intrinsic::experimental_constrained_log:
4184  case Intrinsic::experimental_constrained_log10:
4185  case Intrinsic::experimental_constrained_log2:
4186  case Intrinsic::experimental_constrained_rint:
4187  case Intrinsic::experimental_constrained_nearbyint:
4188  case Intrinsic::experimental_constrained_maxnum:
4189  case Intrinsic::experimental_constrained_minnum:
4190  case Intrinsic::experimental_constrained_ceil:
4191  case Intrinsic::experimental_constrained_floor:
4192  case Intrinsic::experimental_constrained_round:
4193  case Intrinsic::experimental_constrained_trunc:
4194  visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4195  break;
4196  case Intrinsic::dbg_declare: // llvm.dbg.declare
4197  Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
4198  "invalid llvm.dbg.declare intrinsic call 1", Call);
4199  visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4200  break;
4201  case Intrinsic::dbg_addr: // llvm.dbg.addr
4202  visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4203  break;
4204  case Intrinsic::dbg_value: // llvm.dbg.value
4205  visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4206  break;
4207  case Intrinsic::dbg_label: // llvm.dbg.label
4208  visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4209  break;
4210  case Intrinsic::memcpy:
4211  case Intrinsic::memmove:
4212  case Intrinsic::memset: {
4213  const auto *MI = cast<MemIntrinsic>(&Call);
4214  auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4215  return Alignment == 0 || isPowerOf2_32(Alignment);
4216  };
4217  Assert(IsValidAlignment(MI->getDestAlignment()),
4218  "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4219  Call);
4220  if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4221  Assert(IsValidAlignment(MTI->getSourceAlignment()),
4222  "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4223  Call);
4224  }
4225  Assert(isa<ConstantInt>(Call.getArgOperand(3)),
4226  "isvolatile argument of memory intrinsics must be a constant int",
4227  Call);
4228  break;
4229  }
4230  case Intrinsic::memcpy_element_unordered_atomic:
4231  case Intrinsic::memmove_element_unordered_atomic:
4232  case Intrinsic::memset_element_unordered_atomic: {
4233  const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4234 
4235  ConstantInt *ElementSizeCI =
4236  dyn_cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4237  Assert(ElementSizeCI,
4238  "element size of the element-wise unordered atomic memory "
4239  "intrinsic must be a constant int",
4240  Call);
4241  const APInt &ElementSizeVal = ElementSizeCI->getValue();
4242  Assert(ElementSizeVal.isPowerOf2(),
4243  "element size of the element-wise atomic memory intrinsic "
4244  "must be a power of 2",
4245  Call);
4246 
4247  if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
4248  uint64_t Length = LengthCI->getZExtValue();
4249  uint64_t ElementSize = AMI->getElementSizeInBytes();
4250  Assert((Length % ElementSize) == 0,
4251  "constant length must be a multiple of the element size in the "
4252  "element-wise atomic memory intrinsic",
4253  Call);
4254  }
4255 
4256  auto IsValidAlignment = [&](uint64_t Alignment) {
4257  return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4258  };
4259  uint64_t DstAlignment = AMI->getDestAlignment();
4260  Assert(IsValidAlignment(DstAlignment),
4261  "incorrect alignment of the destination argument", Call);
4262  if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4263  uint64_t SrcAlignment = AMT->getSourceAlignment();
4264  Assert(IsValidAlignment(SrcAlignment),
4265  "incorrect alignment of the source argument", Call);
4266  }
4267  break;
4268  }
4269  case Intrinsic::gcroot:
4270  case Intrinsic::gcwrite:
4271  case Intrinsic::gcread:
4272  if (ID == Intrinsic::gcroot) {
4273  AllocaInst *AI =
4275  Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
4276  Assert(isa<Constant>(Call.getArgOperand(1)),
4277  "llvm.gcroot parameter #2 must be a constant.", Call);
4278  if (!AI->getAllocatedType()->isPointerTy()) {
4279  Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
4280  "llvm.gcroot parameter #1 must either be a pointer alloca, "
4281  "or argument #2 must be a non-null constant.",
4282  Call);
4283  }
4284  }
4285 
4286  Assert(Call.getParent()->getParent()->hasGC(),
4287  "Enclosing function does not use GC.", Call);
4288  break;
4289  case Intrinsic::init_trampoline:
4290  Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
4291  "llvm.init_trampoline parameter #2 must resolve to a function.",
4292  Call);
4293  break;
4294  case Intrinsic::prefetch:
4295  Assert(isa<ConstantInt>(Call.getArgOperand(1)) &&
4296  isa<ConstantInt>(Call.getArgOperand(2)) &&
4297  cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
4298  cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
4299  "invalid arguments to llvm.prefetch", Call);
4300  break;
4301  case Intrinsic::stackprotector:
4302  Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
4303  "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
4304  break;
4305  case Intrinsic::lifetime_start:
4306  case Intrinsic::lifetime_end:
4307  case Intrinsic::invariant_start:
4308  Assert(isa<ConstantInt>(Call.getArgOperand(0)),
4309  "size argument of memory use markers must be a constant integer",
4310  Call);
4311  break;
4312  case Intrinsic::invariant_end:
4313  Assert(isa<ConstantInt>(Call.getArgOperand(1)),
4314  "llvm.invariant.end parameter #2 must be a constant integer", Call);
4315  break;
4316 
4317  case Intrinsic::localescape: {
4318  BasicBlock *BB = Call.getParent();
4319  Assert(BB == &BB->getParent()->front(),
4320  "llvm.localescape used outside of entry block", Call);
4321  Assert(!SawFrameEscape,
4322  "multiple calls to llvm.localescape in one function", Call);
4323  for (Value *Arg : Call.args()) {
4324  if (isa<ConstantPointerNull>(Arg))
4325  continue; // Null values are allowed as placeholders.
4326  auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4327  Assert(AI && AI->isStaticAlloca(),
4328  "llvm.localescape only accepts static allocas", Call);
4329  }
4330  FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
4331  SawFrameEscape = true;
4332  break;
4333  }
4334  case Intrinsic::localrecover: {
4335  Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
4336  Function *Fn = dyn_cast<Function>(FnArg);
4337  Assert(Fn && !Fn->isDeclaration(),
4338  "llvm.localrecover first "
4339  "argument must be function defined in this module",
4340  Call);
4341  auto *IdxArg = dyn_cast<ConstantInt>(Call.getArgOperand(2));
4342  Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
4343  Call);
4344  auto &Entry = FrameEscapeInfo[Fn];
4345  Entry.second = unsigned(
4346  std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4347  break;
4348  }
4349 
4350  case Intrinsic::experimental_gc_statepoint:
4351  if (auto *CI = dyn_cast<CallInst>(&Call))
4352  Assert(!CI->isInlineAsm(),
4353  "gc.statepoint support for inline assembly unimplemented", CI);
4354  Assert(Call.getParent()->getParent()->hasGC(),
4355  "Enclosing function does not use GC.", Call);
4356 
4357  verifyStatepoint(Call);
4358  break;
4359  case Intrinsic::experimental_gc_result: {
4360  Assert(Call.getParent()->getParent()->hasGC(),
4361  "Enclosing function does not use GC.", Call);
4362  // Are we tied to a statepoint properly?
4363  const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
4364  const Function *StatepointFn =
4365  StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
4366  Assert(StatepointFn && StatepointFn->isDeclaration() &&
4367  StatepointFn->getIntrinsicID() ==
4368  Intrinsic::experimental_gc_statepoint,
4369  "gc.result operand #1 must be from a statepoint", Call,
4370  Call.getArgOperand(0));
4371 
4372  // Assert that result type matches wrapped callee.
4373  const Value *Target = StatepointCall->getArgOperand(2);
4374  auto *PT = cast<PointerType>(Target->getType());
4375  auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4376  Assert(Call.getType() == TargetFuncType->getReturnType(),
4377  "gc.result result type does not match wrapped callee", Call);
4378  break;
4379  }
4380  case Intrinsic::experimental_gc_relocate: {
4381  Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
4382 
4383  Assert(isa<PointerType>(Call.getType()->getScalarType()),
4384  "gc.relocate must return a pointer or a vector of pointers", Call);
4385 
4386  // Check that this relocate is correctly tied to the statepoint
4387 
4388  // This is case for relocate on the unwinding path of an invoke statepoint
4389  if (LandingPadInst *LandingPad =
4390  dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
4391 
4392  const BasicBlock *InvokeBB =
4393  LandingPad->getParent()->getUniquePredecessor();
4394 
4395  // Landingpad relocates should have only one predecessor with invoke
4396  // statepoint terminator
4397  Assert(InvokeBB, "safepoints should have unique landingpads",
4398  LandingPad->getParent());
4399  Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4400  InvokeBB);
4401  Assert(isStatepoint(InvokeBB->getTerminator()),
4402  "gc relocate should be linked to a statepoint", InvokeBB);
4403  } else {
4404  // In all other cases relocate should be tied to the statepoint directly.
4405  // This covers relocates on a normal return path of invoke statepoint and
4406  // relocates of a call statepoint.
4407  auto Token = Call.getArgOperand(0);
4408  Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4409  "gc relocate is incorrectly tied to the statepoint", Call, Token);
4410  }
4411 
4412  // Verify rest of the relocate arguments.
4413  const CallBase &StatepointCall =
4414  *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
4415 
4416  // Both the base and derived must be piped through the safepoint.
4417  Value *Base = Call.getArgOperand(1);
4418  Assert(isa<ConstantInt>(Base),
4419  "gc.relocate operand #2 must be integer offset", Call);
4420 
4421  Value *Derived = Call.getArgOperand(2);
4422  Assert(isa<ConstantInt>(Derived),
4423  "gc.relocate operand #3 must be integer offset", Call);
4424 
4425  const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4426  const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4427  // Check the bounds
4428  Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
4429  "gc.relocate: statepoint base index out of bounds", Call);
4430  Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
4431  "gc.relocate: statepoint derived index out of bounds", Call);
4432 
4433  // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4434  // section of the statepoint's argument.
4435  Assert(StatepointCall.arg_size() > 0,
4436  "gc.statepoint: insufficient arguments");
4437  Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
4438  "gc.statement: number of call arguments must be constant integer");
4439  const unsigned NumCallArgs =
4440  cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
4441  Assert(StatepointCall.arg_size() > NumCallArgs + 5,
4442  "gc.statepoint: mismatch in number of call arguments");
4443  Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
4444  "gc.statepoint: number of transition arguments must be "
4445  "a constant integer");
4446  const int NumTransitionArgs =
4447  cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
4448  ->getZExtValue();
4449  const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4450  Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
4451  "gc.statepoint: number of deoptimization arguments must be "
4452  "a constant integer");
4453  const int NumDeoptArgs =
4454  cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
4455  ->getZExtValue();
4456  const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4457  const int GCParamArgsEnd = StatepointCall.arg_size();
4458  Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4459  "gc.relocate: statepoint base index doesn't fall within the "
4460  "'gc parameters' section of the statepoint call",
4461  Call);
4462  Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4463  "gc.relocate: statepoint derived index doesn't fall within the "
4464  "'gc parameters' section of the statepoint call",
4465  Call);
4466 
4467  // Relocated value must be either a pointer type or vector-of-pointer type,
4468  // but gc_relocate does not need to return the same pointer type as the
4469  // relocated pointer. It can be casted to the correct type later if it's
4470  // desired. However, they must have the same address space and 'vectorness'
4471  GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
4473  "gc.relocate: relocated value must be a gc pointer", Call);
4474 
4475  auto ResultType = Call.getType();
4476  auto DerivedType = Relocate.getDerivedPtr()->getType();
4477  Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4478  "gc.relocate: vector relocates to vector and pointer to pointer",
4479  Call);
4480  Assert(
4481  ResultType->getPointerAddressSpace() ==
4482  DerivedType->getPointerAddressSpace(),
4483  "gc.relocate: relocating a pointer shouldn't change its address space",
4484  Call);
4485  break;
4486  }
4487  case Intrinsic::eh_exceptioncode:
4488  case Intrinsic::eh_exceptionpointer: {
4489  Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
4490  "eh.exceptionpointer argument must be a catchpad", Call);
4491  break;
4492  }
4493  case Intrinsic::masked_load: {
4494  Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
4495  Call);
4496 
4497  Value *Ptr = Call.getArgOperand(0);
4498  // Value *Alignment = Call.getArgOperand(1);
4499  Value *Mask = Call.getArgOperand(2);
4500  Value *PassThru = Call.getArgOperand(3);
4501  Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
4502  Call);
4503 
4504  // DataTy is the overloaded type
4505  Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4506  Assert(DataTy == Call.getType(),
4507  "masked_load: return must match pointer type", Call);
4508  Assert(PassThru->getType() == DataTy,
4509  "masked_load: pass through and data type must match", Call);
4510  Assert(Mask->getType()->getVectorNumElements() ==
4511  DataTy->getVectorNumElements(),
4512  "masked_load: vector mask must be same length as data", Call);
4513  break;
4514  }
4515  case Intrinsic::masked_store: {
4516  Value *Val = Call.getArgOperand(0);
4517  Value *Ptr = Call.getArgOperand(1);
4518  // Value *Alignment = Call.getArgOperand(2);
4519  Value *Mask = Call.getArgOperand(3);
4520  Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
4521  Call);
4522 
4523  // DataTy is the overloaded type
4524  Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4525  Assert(DataTy == Val->getType(),
4526  "masked_store: storee must match pointer type", Call);
4527  Assert(Mask->getType()->getVectorNumElements() ==
4528  DataTy->getVectorNumElements(),
4529  "masked_store: vector mask must be same length as data", Call);
4530  break;
4531  }
4532 
4533  case Intrinsic::experimental_guard: {
4534  Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
4536  "experimental_guard must have exactly one "
4537  "\"deopt\" operand bundle");
4538  break;
4539  }
4540 
4541  case Intrinsic::experimental_deoptimize: {
4542  Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
4543  Call);
4545  "experimental_deoptimize must have exactly one "
4546  "\"deopt\" operand bundle");
4547  Assert(Call.getType() == Call.getFunction()->getReturnType(),
4548  "experimental_deoptimize return type must match caller return type");
4549 
4550  if (isa<CallInst>(Call)) {
4551  auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
4552  Assert(RI,
4553  "calls to experimental_deoptimize must be followed by a return");
4554 
4555  if (!Call.getType()->isVoidTy() && RI)
4556  Assert(RI->getReturnValue() == &Call,
4557  "calls to experimental_deoptimize must be followed by a return "
4558  "of the value computed by experimental_deoptimize");
4559  }
4560 
4561  break;
4562  }
4563  case Intrinsic::sadd_sat:
4564  case Intrinsic::uadd_sat:
4565  case Intrinsic::ssub_sat:
4566  case Intrinsic::usub_sat: {
4567  Value *Op1 = Call.getArgOperand(0);
4568  Value *Op2 = Call.getArgOperand(1);
4569  Assert(Op1->getType()->isIntOrIntVectorTy(),
4570  "first operand of [us][add|sub]_sat must be an int type or vector "
4571  "of ints");
4572  Assert(Op2->getType()->isIntOrIntVectorTy(),
4573  "second operand of [us][add|sub]_sat must be an int type or vector "
4574  "of ints");
4575  break;
4576  }
4577  case Intrinsic::smul_fix: {
4578  Value *Op1 = Call.getArgOperand(0);
4579  Value *Op2 = Call.getArgOperand(1);
4580  Assert(Op1->getType()->isIntOrIntVectorTy(),
4581  "first operand of smul_fix must be an int type or vector "
4582  "of ints");
4583  Assert(Op2->getType()->isIntOrIntVectorTy(),
4584  "second operand of smul_fix must be an int type or vector "
4585  "of ints");
4586 
4587  auto *Op3 = dyn_cast<ConstantInt>(Call.getArgOperand(2));
4588  Assert(Op3, "third argument of smul_fix must be a constant integer");
4589  Assert(Op3->getType()->getBitWidth() <= 32,
4590  "third argument of smul_fix must fit within 32 bits");
4591  Assert(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4592  "the scale of smul_fix must be less than the width of the operands");
4593  break;
4594  }
4595  };
4596 }
4597 
4598 /// Carefully grab the subprogram from a local scope.
4599 ///
4600 /// This carefully grabs the subprogram from a local scope, avoiding the
4601 /// built-in assertions that would typically fire.
4602 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4603  if (!LocalScope)
4604  return nullptr;
4605 
4606  if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4607  return SP;
4608 
4609  if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4610  return getSubprogram(LB->getRawScope());
4611 
4612  // Just return null; broken scope chains are checked elsewhere.
4613  assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4614  return nullptr;
4615 }
4616 
4617 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4618  unsigned NumOperands = FPI.getNumArgOperands();
4619  Assert(((NumOperands == 5 && FPI.isTernaryOp()) ||
4620  (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)),
4621  "invalid arguments for constrained FP intrinsic", &FPI);
4622  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)),
4623  "invalid exception behavior argument", &FPI);
4624  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)),
4625  "invalid rounding mode argument", &FPI);
4627  "invalid rounding mode argument", &FPI);
4629  "invalid exception behavior argument", &FPI);
4630 }
4631 
4632 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
4633  auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4634  AssertDI(isa<ValueAsMetadata>(MD) ||
4635  (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4636  "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4637  AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4638  "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4639  DII.getRawVariable());
4640  AssertDI(isa<DIExpression>(DII.getRawExpression()),
4641  "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4642  DII.getRawExpression());
4643 
4644  // Ignore broken !dbg attachments; they're checked elsewhere.
4645  if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4646  if (!isa<DILocation>(N))
4647  return;
4648 
4649  BasicBlock *BB = DII.getParent();
4650  Function *F = BB ? BB->getParent() : nullptr;
4651 
4652  // The scopes for variables and !dbg attachments must agree.
4653  DILocalVariable *Var = DII.getVariable();
4654  DILocation *Loc = DII.getDebugLoc();
4655  AssertDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4656  &DII, BB, F);
4657 
4658  DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4659  DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4660  if (!VarSP || !LocSP)
4661  return; // Broken scope chains are checked elsewhere.
4662 
4663  AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4664  " variable and !dbg attachment",
4665  &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4666  Loc->getScope()->getSubprogram());
4667 
4668  // This check is redundant with one in visitLocalVariable().
4669  AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
4670  Var->getRawType());
4671  if (auto *Type = dyn_cast_or_null<DIType>(Var->getRawType()))
4672  if (Type->isBlockByrefStruct())
4674  "BlockByRef variable without complex expression", Var, &DII);
4675 
4676  verifyFnArgs(DII);
4677 }
4678 
4679 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
4680  AssertDI(isa<DILabel>(DLI.getRawLabel()),
4681  "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
4682  DLI.getRawLabel());
4683 
4684  // Ignore broken !dbg attachments; they're checked elsewhere.
4685  if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
4686  if (!isa<DILocation>(N))
4687  return;
4688 
4689  BasicBlock *BB = DLI.getParent();
4690  Function *F = BB ? BB->getParent() : nullptr;
4691 
4692  // The scopes for variables and !dbg attachments must agree.
4693  DILabel *Label = DLI.getLabel();
4694  DILocation *Loc = DLI.getDebugLoc();
4695  Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4696  &DLI, BB, F);
4697 
4698  DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
4699  DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4700  if (!LabelSP || !LocSP)
4701  return;
4702 
4703  AssertDI(LabelSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4704  " label and !dbg attachment",
4705  &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
4706  Loc->getScope()->getSubprogram());
4707 }
4708 
4709 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
4710  DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4711  DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4712 
4713  // We don't know whether this intrinsic verified correctly.
4714  if (!V || !E || !E->isValid())
4715  return;
4716 
4717  // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4718  auto Fragment = E->getFragmentInfo();
4719  if (!Fragment)
4720  return;
4721 
4722  // The frontend helps out GDB by emitting the members of local anonymous
4723  // unions as artificial local variables with shared storage. When SROA splits
4724  // the storage for artificial local variables that are smaller than the entire
4725  // union, the overhang piece will be outside of the allotted space for the
4726  // variable and this check fails.
4727  // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4728  if (V->isArtificial())
4729  return;
4730 
4731  verifyFragmentExpression(*V, *Fragment, &I);
4732 }
4733 
4734 template <typename ValueOrMetadata>
4735 void Verifier::verifyFragmentExpression(const DIVariable &V,
4736  DIExpression::FragmentInfo Fragment,
4737  ValueOrMetadata *Desc) {
4738  // If there's no size, the type is broken, but that should be checked
4739  // elsewhere.
4740  auto VarSize = V.getSizeInBits();
4741  if (!VarSize)
4742  return;
4743 
4744  unsigned FragSize = Fragment.SizeInBits;
4745  unsigned FragOffset = Fragment.OffsetInBits;
4746  AssertDI(FragSize + FragOffset <= *VarSize,
4747  "fragment is larger than or outside of variable", Desc, &V);
4748  AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
4749 }
4750 
4751 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
4752  // This function does not take the scope of noninlined function arguments into
4753  // account. Don't run it if current function is nodebug, because it may
4754  // contain inlined debug intrinsics.
4755  if (!HasDebugInfo)
4756  return;
4757 
4758  // For performance reasons only check non-inlined ones.
4759  if (I.getDebugLoc()->getInlinedAt())
4760  return;
4761 
4762  DILocalVariable *Var = I.getVariable();
4763  AssertDI(Var, "dbg intrinsic without variable");
4764 
4765  unsigned ArgNo = Var->getArg();
4766  if (!ArgNo)
4767  return;
4768 
4769  // Verify there are no duplicate function argument debug info entries.
4770  // These will cause hard-to-debug assertions in the DWARF backend.
4771  if (DebugFnArgs.size() < ArgNo)
4772  DebugFnArgs.resize(ArgNo, nullptr);
4773 
4774  auto *Prev = DebugFnArgs[ArgNo - 1];
4775  DebugFnArgs[ArgNo - 1] = Var;
4776  AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
4777  Prev, Var);
4778 }
4779 
4780 void Verifier::verifyCompileUnits() {
4781  // When more than one Module is imported into the same context, such as during
4782  // an LTO build before linking the modules, ODR type uniquing may cause types
4783  // to point to a different CU. This check does not make sense in this case.
4785  return;
4786  auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
4788  if (CUs)
4789  Listed.insert(CUs->op_begin(), CUs->op_end());
4790  for (auto *CU : CUVisited)
4791  AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
4792  CUVisited.clear();
4793 }
4794 
4795 void Verifier::verifyDeoptimizeCallingConvs() {
4796  if (DeoptimizeDeclarations.empty())
4797  return;
4798 
4799  const Function *First = DeoptimizeDeclarations[0];
4800  for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
4801  Assert(First->getCallingConv() == F->getCallingConv(),
4802  "All llvm.experimental.deoptimize declarations must have the same "
4803  "calling convention",
4804  First, F);
4805  }
4806 }
4807 
4808 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
4809  bool HasSource = F.getSource().hasValue();
4810  if (!HasSourceDebugInfo.count(&U))
4811  HasSourceDebugInfo[&U] = HasSource;
4812  AssertDI(HasSource == HasSourceDebugInfo[&U],
4813  "inconsistent use of embedded source");
4814 }
4815 
4816 //===----------------------------------------------------------------------===//
4817 // Implement the public interfaces to this file...
4818 //===----------------------------------------------------------------------===//
4819 
4821  Function &F = const_cast<Function &>(f);
4822 
4823  // Don't use a raw_null_ostream. Printing IR is expensive.
4824  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
4825 
4826  // Note that this function's return value is inverted from what you would
4827  // expect of a function called "verify".
4828  return !V.verify(F);
4829 }
4830 
4832  bool *BrokenDebugInfo) {
4833  // Don't use a raw_null_ostream. Printing IR is expensive.
4834  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
4835 
4836  bool Broken = false;
4837  for (const Function &F : M)
4838  Broken |= !V.verify(F);
4839 
4840  Broken |= !V.verify();
4841  if (BrokenDebugInfo)
4842  *BrokenDebugInfo = V.hasBrokenDebugInfo();
4843  // Note that this function's return value is inverted from what you would
4844  // expect of a function called "verify".
4845  return Broken;
4846 }
4847 
4848 namespace {
4849 
4850 struct VerifierLegacyPass : public FunctionPass {
4851  static char ID;
4852 
4853  std::unique_ptr<Verifier> V;
4854  bool FatalErrors = true;
4855 
4856  VerifierLegacyPass() : FunctionPass(ID) {
4858  }
4859  explicit VerifierLegacyPass(bool FatalErrors)
4860  : FunctionPass(ID),
4861  FatalErrors(FatalErrors) {
4863  }
4864 
4865  bool doInitialization(Module &M) override {
4866  V = llvm::make_unique<Verifier>(
4867  &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
4868  return false;
4869  }
4870 
4871  bool runOnFunction(Function &F) override {
4872  if (!V->verify(F) && FatalErrors) {
4873  errs() << "in function " << F.getName() << '\n';
4874  report_fatal_error("Broken function found, compilation aborted!");
4875  }
4876  return false;
4877  }
4878 
4879  bool doFinalization(Module &M) override {
4880  bool HasErrors = false;
4881  for (Function &F : M)
4882  if (F.isDeclaration())
4883  HasErrors |= !V->verify(F);
4884 
4885  HasErrors |= !V->verify();
4886  if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
4887  report_fatal_error("Broken module found, compilation aborted!");
4888  return false;
4889  }
4890 
4891  void getAnalysisUsage(AnalysisUsage &AU) const override {
4892  AU.setPreservesAll();
4893  }
4894 };
4895 
4896 } // end anonymous namespace
4897 
4898 /// Helper to issue failure from the TBAA verification
4899 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
4900  if (Diagnostic)
4901  return Diagnostic->CheckFailed(Args...);
4902 }
4903 
4904 #define AssertTBAA(C, ...) \
4905  do { \
4906  if (!(C)) { \
4907  CheckFailed(__VA_ARGS__); \
4908  return false; \
4909  } \
4910  } while (false)
4911 
4912 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
4913 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a
4914 /// struct-type node describing an aggregate data structure (like a struct).
4915 TBAAVerifier::TBAABaseNodeSummary
4916 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
4917  bool IsNewFormat) {
4918  if (BaseNode->getNumOperands() < 2) {
4919  CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
4920  return {true, ~0u};
4921  }
4922 
4923  auto Itr = TBAABaseNodes.find(BaseNode);
4924  if (Itr != TBAABaseNodes.end())
4925  return Itr->second;
4926 
4927  auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
4928  auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
4929  (void)InsertResult;
4930  assert(InsertResult.second && "We just checked!");
4931  return Result;
4932 }
4933 
4934 TBAAVerifier::TBAABaseNodeSummary
4935 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
4936  bool IsNewFormat) {
4937  const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
4938 
4939  if (BaseNode->getNumOperands() == 2) {
4940  // Scalar nodes can only be accessed at offset 0.
4941  return isValidScalarTBAANode(BaseNode)
4942  ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
4943  : InvalidNode;
4944  }
4945 
4946  if (IsNewFormat) {
4947  if (BaseNode->getNumOperands() % 3 != 0) {
4948  CheckFailed("Access tag nodes must have the number of operands that is a "
4949  "multiple of 3!", BaseNode);
4950  return InvalidNode;
4951  }
4952  } else {
4953  if (BaseNode->getNumOperands() % 2 != 1) {
4954  CheckFailed("Struct tag nodes must have an odd number of operands!",
4955  BaseNode);
4956  return InvalidNode;
4957  }
4958  }
4959 
4960  // Check the type size field.
4961  if (IsNewFormat) {
4962  auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
4963  BaseNode->getOperand(1));
4964  if (!TypeSizeNode) {
4965  CheckFailed("Type size nodes must be constants!", &I, BaseNode);
4966  return InvalidNode;
4967  }
4968  }
4969 
4970  // Check the type name field. In the new format it can be anything.
4971  if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
4972  CheckFailed("Struct tag nodes have a string as their first operand",
4973  BaseNode);
4974  return InvalidNode;
4975  }
4976 
4977  bool Failed = false;
4978 
4979  Optional<APInt> PrevOffset;
4980  unsigned BitWidth = ~0u;
4981 
4982  // We've already checked that BaseNode is not a degenerate root node with one
4983  // operand in \c verifyTBAABaseNode, so this loop should run at least once.
4984  unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
4985  unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
4986  for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
4987  Idx += NumOpsPerField) {
4988  const MDOperand &FieldTy = BaseNode->getOperand(Idx);
4989  const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
4990  if (!isa<MDNode>(FieldTy)) {
4991  CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
4992  Failed = true;
4993  continue;
4994  }
4995 
4996  auto *OffsetEntryCI =
4997  mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
4998  if (!OffsetEntryCI) {
4999  CheckFailed("Offset entries must be constants!", &I, BaseNode);
5000  Failed = true;
5001  continue;
5002  }
5003 
5004  if (BitWidth == ~0u)
5005  BitWidth = OffsetEntryCI->getBitWidth();
5006 
5007  if (OffsetEntryCI->getBitWidth() != BitWidth) {
5008  CheckFailed(
5009  "Bitwidth between the offsets and struct type entries must match", &I,
5010  BaseNode);
5011  Failed = true;
5012  continue;
5013  }
5014 
5015  // NB! As far as I can tell, we generate a non-strictly increasing offset
5016  // sequence only from structs that have zero size bit fields. When
5017  // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
5018  // pick the field lexically the latest in struct type metadata node. This
5019  // mirrors the actual behavior of the alias analysis implementation.
5020  bool IsAscending =
5021  !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
5022 
5023  if (!IsAscending) {
5024  CheckFailed("Offsets must be increasing!", &I, BaseNode);
5025  Failed = true;
5026  }
5027 
5028  PrevOffset = OffsetEntryCI->getValue();
5029 
5030  if (IsNewFormat) {
5031  auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5032  BaseNode->getOperand(Idx + 2));
5033  if (!MemberSizeNode) {
5034  CheckFailed("Member size entries must be constants!", &I, BaseNode);
5035  Failed = true;
5036  continue;
5037  }
5038  }
5039  }
5040 
5041  return Failed ? InvalidNode
5042  : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
5043 }
5044 
5045 static bool IsRootTBAANode(const MDNode *MD) {
5046  return MD->getNumOperands() < 2;
5047 }
5048 
5049 static bool IsScalarTBAANodeImpl(const MDNode *MD,
5051  if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
5052  return false;
5053 
5054  if (!isa<MDString>(MD->getOperand(0)))
5055  return false;
5056 
5057  if (MD->getNumOperands() == 3) {
5058  auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
5059  if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
5060  return false;
5061  }
5062 
5063  auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5064  return Parent && Visited.insert(Parent).second &&
5065  (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
5066 }
5067 
5068 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
5069  auto ResultIt = TBAAScalarNodes.find(MD);
5070  if (ResultIt != TBAAScalarNodes.end())
5071  return ResultIt->second;
5072 
5074  bool Result = IsScalarTBAANodeImpl(MD, Visited);
5075  auto InsertResult = TBAAScalarNodes.insert({MD, Result});
5076  (void)InsertResult;
5077  assert(InsertResult.second && "Just checked!");
5078 
5079  return Result;
5080 }
5081 
5082 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
5083 /// Offset in place to be the offset within the field node returned.
5084 ///
5085 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
5086 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
5087  const MDNode *BaseNode,
5088  APInt &Offset,
5089  bool IsNewFormat) {
5090  assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
5091 
5092  // Scalar nodes have only one possible "field" -- their parent in the access
5093  // hierarchy. Offset must be zero at this point, but our caller is supposed
5094  // to Assert that.
5095  if (BaseNode->getNumOperands() == 2)
5096  return cast<MDNode>(BaseNode->getOperand(1));
5097 
5098  unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5099  unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5100  for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5101  Idx += NumOpsPerField) {
5102  auto *OffsetEntryCI =
5103  mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
5104  if (OffsetEntryCI->getValue().ugt(Offset)) {
5105  if (Idx == FirstFieldOpNo) {
5106  CheckFailed("Could not find TBAA parent in struct type node", &I,
5107  BaseNode, &Offset);
5108  return nullptr;
5109  }
5110 
5111  unsigned PrevIdx = Idx - NumOpsPerField;
5112  auto *PrevOffsetEntryCI =
5113  mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
5114  Offset -= PrevOffsetEntryCI->getValue();
5115  return cast<MDNode>(BaseNode->getOperand(PrevIdx));
5116  }
5117  }
5118 
5119  unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
5120  auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
5121  BaseNode->getOperand(LastIdx + 1));
5122  Offset -= LastOffsetEntryCI->getValue();
5123  return cast<MDNode>(BaseNode->getOperand(LastIdx));
5124 }
5125 
5127  if (!Type || Type->getNumOperands() < 3)
5128  return false;
5129 
5130  // In the new format type nodes shall have a reference to the parent type as
5131  // its first operand.
5132  MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
5133  if (!Parent)
5134  return false;
5135 
5136  return true;
5137 }
5138 
5140  AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5141  isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
5142  isa<AtomicCmpXchgInst>(I),
5143  "This instruction shall not have a TBAA access tag!", &I);
5144 
5145  bool IsStructPathTBAA =
5146  isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
5147 
5148  AssertTBAA(
5149  IsStructPathTBAA,
5150  "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
5151 
5152  MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
5153  MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5154 
5155  bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
5156 
5157  if (IsNewFormat) {
5158  AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
5159  "Access tag metadata must have either 4 or 5 operands", &I, MD);
5160  } else {
5161  AssertTBAA(MD->getNumOperands() < 5,
5162  "Struct tag metadata must have either 3 or 4 operands", &I, MD);
5163  }
5164 
5165  // Check the access size field.
5166  if (IsNewFormat) {
5167  auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5168  MD->getOperand(3));
5169  AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
5170  }
5171 
5172  // Check the immutability flag.
5173  unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
5174  if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
5175  auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
5176  MD->getOperand(ImmutabilityFlagOpNo));
5177  AssertTBAA(IsImmutableCI,
5178  "Immutability tag on struct tag metadata must be a constant",
5179  &I, MD);
5180  AssertTBAA(
5181  IsImmutableCI->isZero() || IsImmutableCI->isOne(),
5182  "Immutability part of the struct tag metadata must be either 0 or 1",
5183  &I, MD);
5184  }
5185 
5186  AssertTBAA(BaseNode && AccessType,
5187  "Malformed struct tag metadata: base and access-type "
5188  "should be non-null and point to Metadata nodes",
5189  &I, MD, BaseNode, AccessType);
5190 
5191  if (!IsNewFormat) {
5192  AssertTBAA(isValidScalarTBAANode(AccessType),
5193  "Access type node must be a valid scalar type", &I, MD,
5194  AccessType);
5195  }
5196 
5197  auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
5198  AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
5199 
5200  APInt Offset = OffsetCI->getValue();
5201  bool SeenAccessTypeInPath = false;
5202 
5203  SmallPtrSet<MDNode *, 4> StructPath;
5204 
5205  for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
5206  BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,