LLVM  14.0.0git
ThreadSanitizer.cpp
Go to the documentation of this file.
1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer, a race detector.
10 //
11 // The tool is under development, for the details about previous versions see
12 // http://code.google.com/p/data-race-test
13 //
14 // The instrumentation phase is quite simple:
15 // - Insert calls to run-time library before every memory access.
16 // - Optimizations may apply to avoid instrumenting some of the accesses.
17 // - Insert calls at function entry/exit.
18 // The rest is handled by the run-time library.
19 //===----------------------------------------------------------------------===//
20 
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Optional.h"
24 #include "llvm/ADT/SmallString.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/IR/Metadata.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/InitializePasses.h"
44 #include "llvm/Support/Debug.h"
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "tsan"
56 
58  "tsan-instrument-memory-accesses", cl::init(true),
59  cl::desc("Instrument memory accesses"), cl::Hidden);
60 static cl::opt<bool>
61  ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true),
62  cl::desc("Instrument function entry and exit"),
63  cl::Hidden);
65  "tsan-handle-cxx-exceptions", cl::init(true),
66  cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
67  cl::Hidden);
68 static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics",
69  cl::init(true),
70  cl::desc("Instrument atomics"),
71  cl::Hidden);
73  "tsan-instrument-memintrinsics", cl::init(true),
74  cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
76  "tsan-distinguish-volatile", cl::init(false),
77  cl::desc("Emit special instrumentation for accesses to volatiles"),
78  cl::Hidden);
80  "tsan-instrument-read-before-write", cl::init(false),
81  cl::desc("Do not eliminate read instrumentation for read-before-writes"),
82  cl::Hidden);
84  "tsan-compound-read-before-write", cl::init(false),
85  cl::desc("Emit special compound instrumentation for reads-before-writes"),
86  cl::Hidden);
87 
88 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
89 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
90 STATISTIC(NumOmittedReadsBeforeWrite,
91  "Number of reads ignored due to following writes");
92 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
93 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
94 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
95 STATISTIC(NumOmittedReadsFromConstantGlobals,
96  "Number of reads from constant globals");
97 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
98 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
99 
100 const char kTsanModuleCtorName[] = "tsan.module_ctor";
101 const char kTsanInitName[] = "__tsan_init";
102 
103 namespace {
104 
105 /// ThreadSanitizer: instrument the code in module to find races.
106 ///
107 /// Instantiating ThreadSanitizer inserts the tsan runtime library API function
108 /// declarations into the module if they don't exist already. Instantiating
109 /// ensures the __tsan_init function is in the list of global constructors for
110 /// the module.
111 struct ThreadSanitizer {
112  ThreadSanitizer() {
113  // Check options and warn user.
115  errs()
116  << "warning: Option -tsan-compound-read-before-write has no effect "
117  "when -tsan-instrument-read-before-write is set.\n";
118  }
119  }
120 
121  bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
122 
123 private:
124  // Internal Instruction wrapper that contains more information about the
125  // Instruction from prior analysis.
126  struct InstructionInfo {
127  // Instrumentation emitted for this instruction is for a compounded set of
128  // read and write operations in the same basic block.
129  static constexpr unsigned kCompoundRW = (1U << 0);
130 
131  explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {}
132 
133  Instruction *Inst;
134  unsigned Flags = 0;
135  };
136 
137  void initialize(Module &M);
138  bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL);
139  bool instrumentAtomic(Instruction *I, const DataLayout &DL);
140  bool instrumentMemIntrinsic(Instruction *I);
141  void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
143  const DataLayout &DL);
144  bool addrPointsToConstantData(Value *Addr);
145  int getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr, const DataLayout &DL);
146  void InsertRuntimeIgnores(Function &F);
147 
148  Type *IntptrTy;
149  FunctionCallee TsanFuncEntry;
150  FunctionCallee TsanFuncExit;
151  FunctionCallee TsanIgnoreBegin;
152  FunctionCallee TsanIgnoreEnd;
153  // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
154  static const size_t kNumberOfAccessSizes = 5;
157  FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
158  FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
159  FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
160  FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
161  FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
162  FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
163  FunctionCallee TsanCompoundRW[kNumberOfAccessSizes];
164  FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes];
165  FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
166  FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
167  FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
169  FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
170  FunctionCallee TsanAtomicThreadFence;
171  FunctionCallee TsanAtomicSignalFence;
172  FunctionCallee TsanVptrUpdate;
173  FunctionCallee TsanVptrLoad;
174  FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
175 };
176 
177 struct ThreadSanitizerLegacyPass : FunctionPass {
178  ThreadSanitizerLegacyPass() : FunctionPass(ID) {
180  }
181  StringRef getPassName() const override;
182  void getAnalysisUsage(AnalysisUsage &AU) const override;
183  bool runOnFunction(Function &F) override;
184  bool doInitialization(Module &M) override;
185  static char ID; // Pass identification, replacement for typeid.
186 private:
188 };
189 
190 void insertModuleCtor(Module &M) {
192  M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
193  /*InitArgs=*/{},
194  // This callback is invoked when the functions are created the first
195  // time. Hook them into the global ctors list in that case:
196  [&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
197 }
198 
199 } // namespace
200 
203  ThreadSanitizer TSan;
204  if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
205  return PreservedAnalyses::none();
206  return PreservedAnalyses::all();
207 }
208 
211  insertModuleCtor(M);
212  return PreservedAnalyses::none();
213 }
214 
216 INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
217  "ThreadSanitizer: detects data races.", false, false)
219 INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
220  "ThreadSanitizer: detects data races.", false, false)
221 
222 StringRef ThreadSanitizerLegacyPass::getPassName() const {
223  return "ThreadSanitizerLegacyPass";
224 }
225 
226 void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
228 }
229 
230 bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
231  insertModuleCtor(M);
232  TSan.emplace();
233  return true;
234 }
235 
237  auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
238  TSan->sanitizeFunction(F, TLI);
239  return true;
240 }
241 
243  return new ThreadSanitizerLegacyPass();
244 }
245 
247  const DataLayout &DL = M.getDataLayout();
248  IntptrTy = DL.getIntPtrType(M.getContext());
249 
250  IRBuilder<> IRB(M.getContext());
251  AttributeList Attr;
252  Attr = Attr.addFnAttribute(M.getContext(), Attribute::NoUnwind);
253  // Initialize the callbacks.
254  TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
255  IRB.getVoidTy(), IRB.getInt8PtrTy());
256  TsanFuncExit =
257  M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
258  TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
259  IRB.getVoidTy());
260  TsanIgnoreEnd =
261  M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
262  IntegerType *OrdTy = IRB.getInt32Ty();
263  for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
264  const unsigned ByteSize = 1U << i;
265  const unsigned BitSize = ByteSize * 8;
266  std::string ByteSizeStr = utostr(ByteSize);
267  std::string BitSizeStr = utostr(BitSize);
268  SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
269  TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
270  IRB.getInt8PtrTy());
271 
272  SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
273  TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
274  IRB.getInt8PtrTy());
275 
276  SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
277  TsanUnalignedRead[i] = M.getOrInsertFunction(
278  UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
279 
280  SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
281  TsanUnalignedWrite[i] = M.getOrInsertFunction(
282  UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
283 
284  SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
285  TsanVolatileRead[i] = M.getOrInsertFunction(
286  VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
287 
288  SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
289  TsanVolatileWrite[i] = M.getOrInsertFunction(
290  VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
291 
292  SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
293  ByteSizeStr);
294  TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
295  UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
296 
297  SmallString<64> UnalignedVolatileWriteName(
298  "__tsan_unaligned_volatile_write" + ByteSizeStr);
299  TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
300  UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
301 
302  SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
303  TsanCompoundRW[i] = M.getOrInsertFunction(
304  CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
305 
306  SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
307  ByteSizeStr);
308  TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
309  UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
310 
311  Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
312  Type *PtrTy = Ty->getPointerTo();
313  SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
314  {
315  AttributeList AL = Attr;
316  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
317  TsanAtomicLoad[i] =
318  M.getOrInsertFunction(AtomicLoadName, AL, Ty, PtrTy, OrdTy);
319  }
320 
321  SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
322  {
323  AttributeList AL = Attr;
324  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
325  AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
326  TsanAtomicStore[i] = M.getOrInsertFunction(
327  AtomicStoreName, AL, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
328  }
329 
330  for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
332  TsanAtomicRMW[Op][i] = nullptr;
333  const char *NamePart = nullptr;
334  if (Op == AtomicRMWInst::Xchg)
335  NamePart = "_exchange";
336  else if (Op == AtomicRMWInst::Add)
337  NamePart = "_fetch_add";
338  else if (Op == AtomicRMWInst::Sub)
339  NamePart = "_fetch_sub";
340  else if (Op == AtomicRMWInst::And)
341  NamePart = "_fetch_and";
342  else if (Op == AtomicRMWInst::Or)
343  NamePart = "_fetch_or";
344  else if (Op == AtomicRMWInst::Xor)
345  NamePart = "_fetch_xor";
346  else if (Op == AtomicRMWInst::Nand)
347  NamePart = "_fetch_nand";
348  else
349  continue;
350  SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
351  {
352  AttributeList AL = Attr;
353  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
354  AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
355  TsanAtomicRMW[Op][i] =
356  M.getOrInsertFunction(RMWName, AL, Ty, PtrTy, Ty, OrdTy);
357  }
358  }
359 
360  SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
361  "_compare_exchange_val");
362  {
363  AttributeList AL = Attr;
364  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
365  AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
366  AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
367  AL = AL.addParamAttribute(M.getContext(), 4, Attribute::ZExt);
368  TsanAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, AL, Ty, PtrTy, Ty,
369  Ty, OrdTy, OrdTy);
370  }
371  }
372  TsanVptrUpdate =
373  M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
374  IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
375  TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
376  IRB.getVoidTy(), IRB.getInt8PtrTy());
377  {
378  AttributeList AL = Attr;
379  AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
380  TsanAtomicThreadFence = M.getOrInsertFunction("__tsan_atomic_thread_fence",
381  AL, IRB.getVoidTy(), OrdTy);
382  }
383  {
384  AttributeList AL = Attr;
385  AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
386  TsanAtomicSignalFence = M.getOrInsertFunction("__tsan_atomic_signal_fence",
387  AL, IRB.getVoidTy(), OrdTy);
388  }
389 
390  MemmoveFn =
391  M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
392  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
393  MemcpyFn =
394  M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
395  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
396  MemsetFn =
397  M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
398  IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
399 }
400 
401 static bool isVtableAccess(Instruction *I) {
402  if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
403  return Tag->isTBAAVtableAccess();
404  return false;
405 }
406 
407 // Do not instrument known races/"benign races" that come from compiler
408 // instrumentatin. The user has no way of suppressing them.
410  // Peel off GEPs and BitCasts.
411  Addr = Addr->stripInBoundsOffsets();
412 
413  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
414  if (GV->hasSection()) {
415  StringRef SectionName = GV->getSection();
416  // Check if the global is in the PGO counters section.
417  auto OF = Triple(M->getTargetTriple()).getObjectFormat();
418  if (SectionName.endswith(
419  getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
420  return false;
421  }
422 
423  // Check if the global is private gcov data.
424  if (GV->getName().startswith("__llvm_gcov") ||
425  GV->getName().startswith("__llvm_gcda"))
426  return false;
427  }
428 
429  // Do not instrument acesses from different address spaces; we cannot deal
430  // with them.
431  if (Addr) {
432  Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
433  if (PtrTy->getPointerAddressSpace() != 0)
434  return false;
435  }
436 
437  return true;
438 }
439 
440 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
441  // If this is a GEP, just analyze its pointer operand.
442  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
443  Addr = GEP->getPointerOperand();
444 
445  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
446  if (GV->isConstant()) {
447  // Reads from constant globals can not race with any writes.
448  NumOmittedReadsFromConstantGlobals++;
449  return true;
450  }
451  } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
452  if (isVtableAccess(L)) {
453  // Reads from a vtable pointer can not race with any writes.
454  NumOmittedReadsFromVtable++;
455  return true;
456  }
457  }
458  return false;
459 }
460 
461 // Instrumenting some of the accesses may be proven redundant.
462 // Currently handled:
463 // - read-before-write (within same BB, no calls between)
464 // - not captured variables
465 //
466 // We do not handle some of the patterns that should not survive
467 // after the classic compiler optimizations.
468 // E.g. two reads from the same temp should be eliminated by CSE,
469 // two writes should be eliminated by DSE, etc.
470 //
471 // 'Local' is a vector of insns within the same BB (no calls between).
472 // 'All' is a vector of insns that will be instrumented.
473 void ThreadSanitizer::chooseInstructionsToInstrument(
476  DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All
477  // Iterate from the end.
478  for (Instruction *I : reverse(Local)) {
479  const bool IsWrite = isa<StoreInst>(*I);
480  Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand()
481  : cast<LoadInst>(I)->getPointerOperand();
482 
483  if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
484  continue;
485 
486  if (!IsWrite) {
487  const auto WriteEntry = WriteTargets.find(Addr);
488  if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) {
489  auto &WI = All[WriteEntry->second];
490  // If we distinguish volatile accesses and if either the read or write
491  // is volatile, do not omit any instrumentation.
492  const bool AnyVolatile =
493  ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() ||
494  cast<StoreInst>(WI.Inst)->isVolatile());
495  if (!AnyVolatile) {
496  // We will write to this temp, so no reason to analyze the read.
497  // Mark the write instruction as compound.
498  WI.Flags |= InstructionInfo::kCompoundRW;
499  NumOmittedReadsBeforeWrite++;
500  continue;
501  }
502  }
503 
504  if (addrPointsToConstantData(Addr)) {
505  // Addr points to some constant data -- it can not race with any writes.
506  continue;
507  }
508  }
509 
510  if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
511  !PointerMayBeCaptured(Addr, true, true)) {
512  // The variable is addressable but not captured, so it cannot be
513  // referenced from a different thread and participate in a data race
514  // (see llvm/Analysis/CaptureTracking.h for details).
515  NumOmittedNonCaptured++;
516  continue;
517  }
518 
519  // Instrument this instruction.
520  All.emplace_back(I);
521  if (IsWrite) {
522  // For read-before-write and compound instrumentation we only need one
523  // write target, and we can override any previous entry if it exists.
524  WriteTargets[Addr] = All.size() - 1;
525  }
526  }
527  Local.clear();
528 }
529 
530 static bool isAtomic(Instruction *I) {
531  // TODO: Ask TTI whether synchronization scope is between threads.
532  if (LoadInst *LI = dyn_cast<LoadInst>(I))
533  return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
534  if (StoreInst *SI = dyn_cast<StoreInst>(I))
535  return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
536  if (isa<AtomicRMWInst>(I))
537  return true;
538  if (isa<AtomicCmpXchgInst>(I))
539  return true;
540  if (isa<FenceInst>(I))
541  return true;
542  return false;
543 }
544 
545 void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
546  IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
547  IRB.CreateCall(TsanIgnoreBegin);
548  EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
549  while (IRBuilder<> *AtExit = EE.Next()) {
550  AtExit->CreateCall(TsanIgnoreEnd);
551  }
552 }
553 
554 bool ThreadSanitizer::sanitizeFunction(Function &F,
555  const TargetLibraryInfo &TLI) {
556  // This is required to prevent instrumenting call to __tsan_init from within
557  // the module constructor.
558  if (F.getName() == kTsanModuleCtorName)
559  return false;
560  // Naked functions can not have prologue/epilogue
561  // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
562  // all.
563  if (F.hasFnAttribute(Attribute::Naked))
564  return false;
565 
566  // __attribute__(disable_sanitizer_instrumentation) prevents all kinds of
567  // instrumentation.
568  if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
569  return false;
570 
571  initialize(*F.getParent());
572  SmallVector<InstructionInfo, 8> AllLoadsAndStores;
573  SmallVector<Instruction*, 8> LocalLoadsAndStores;
574  SmallVector<Instruction*, 8> AtomicAccesses;
575  SmallVector<Instruction*, 8> MemIntrinCalls;
576  bool Res = false;
577  bool HasCalls = false;
578  bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
579  const DataLayout &DL = F.getParent()->getDataLayout();
580 
581  // Traverse all instructions, collect loads/stores/returns, check for calls.
582  for (auto &BB : F) {
583  for (auto &Inst : BB) {
584  if (isAtomic(&Inst))
585  AtomicAccesses.push_back(&Inst);
586  else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
587  LocalLoadsAndStores.push_back(&Inst);
588  else if ((isa<CallInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst)) ||
589  isa<InvokeInst>(Inst)) {
590  if (CallInst *CI = dyn_cast<CallInst>(&Inst))
592  if (isa<MemIntrinsic>(Inst))
593  MemIntrinCalls.push_back(&Inst);
594  HasCalls = true;
595  chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
596  DL);
597  }
598  }
599  chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
600  }
601 
602  // We have collected all loads and stores.
603  // FIXME: many of these accesses do not need to be checked for races
604  // (e.g. variables that do not escape, etc).
605 
606  // Instrument memory accesses only if we want to report bugs in the function.
607  if (ClInstrumentMemoryAccesses && SanitizeFunction)
608  for (const auto &II : AllLoadsAndStores) {
609  Res |= instrumentLoadOrStore(II, DL);
610  }
611 
612  // Instrument atomic memory accesses in any case (they can be used to
613  // implement synchronization).
615  for (auto Inst : AtomicAccesses) {
616  Res |= instrumentAtomic(Inst, DL);
617  }
618 
619  if (ClInstrumentMemIntrinsics && SanitizeFunction)
620  for (auto Inst : MemIntrinCalls) {
621  Res |= instrumentMemIntrinsic(Inst);
622  }
623 
624  if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
625  assert(!F.hasFnAttribute(Attribute::SanitizeThread));
626  if (HasCalls)
627  InsertRuntimeIgnores(F);
628  }
629 
630  // Instrument function entry/exit points if there were instrumented accesses.
631  if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
632  IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
633  Value *ReturnAddress = IRB.CreateCall(
634  Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
635  IRB.getInt32(0));
636  IRB.CreateCall(TsanFuncEntry, ReturnAddress);
637 
638  EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
639  while (IRBuilder<> *AtExit = EE.Next()) {
640  AtExit->CreateCall(TsanFuncExit, {});
641  }
642  Res = true;
643  }
644  return Res;
645 }
646 
647 bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
648  const DataLayout &DL) {
649  IRBuilder<> IRB(II.Inst);
650  const bool IsWrite = isa<StoreInst>(*II.Inst);
651  Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
652  : cast<LoadInst>(II.Inst)->getPointerOperand();
653  Type *OrigTy = getLoadStoreType(II.Inst);
654 
655  // swifterror memory addresses are mem2reg promoted by instruction selection.
656  // As such they cannot have regular uses like an instrumentation function and
657  // it makes no sense to track them as memory.
658  if (Addr->isSwiftError())
659  return false;
660 
661  int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
662  if (Idx < 0)
663  return false;
664  if (IsWrite && isVtableAccess(II.Inst)) {
665  LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n");
666  Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
667  // StoredValue may be a vector type if we are storing several vptrs at once.
668  // In this case, just take the first element of the vector since this is
669  // enough to find vptr races.
670  if (isa<VectorType>(StoredValue->getType()))
671  StoredValue = IRB.CreateExtractElement(
672  StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
673  if (StoredValue->getType()->isIntegerTy())
674  StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
675  // Call TsanVptrUpdate.
676  IRB.CreateCall(TsanVptrUpdate,
677  {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
678  IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
679  NumInstrumentedVtableWrites++;
680  return true;
681  }
682  if (!IsWrite && isVtableAccess(II.Inst)) {
683  IRB.CreateCall(TsanVptrLoad,
684  IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
685  NumInstrumentedVtableReads++;
686  return true;
687  }
688 
689  const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
690  : cast<LoadInst>(II.Inst)->getAlignment();
691  const bool IsCompoundRW =
692  ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW);
693  const bool IsVolatile = ClDistinguishVolatile &&
694  (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
695  : cast<LoadInst>(II.Inst)->isVolatile());
696  assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!");
697 
698  const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
699  FunctionCallee OnAccessFunc = nullptr;
700  if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
701  if (IsCompoundRW)
702  OnAccessFunc = TsanCompoundRW[Idx];
703  else if (IsVolatile)
704  OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
705  else
706  OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
707  } else {
708  if (IsCompoundRW)
709  OnAccessFunc = TsanUnalignedCompoundRW[Idx];
710  else if (IsVolatile)
711  OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
712  : TsanUnalignedVolatileRead[Idx];
713  else
714  OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
715  }
716  IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
717  if (IsCompoundRW || IsWrite)
718  NumInstrumentedWrites++;
719  if (IsCompoundRW || !IsWrite)
720  NumInstrumentedReads++;
721  return true;
722 }
723 
725  uint32_t v = 0;
726  switch (ord) {
728  llvm_unreachable("unexpected atomic ordering!");
730  case AtomicOrdering::Monotonic: v = 0; break;
731  // Not specified yet:
732  // case AtomicOrdering::Consume: v = 1; break;
733  case AtomicOrdering::Acquire: v = 2; break;
734  case AtomicOrdering::Release: v = 3; break;
735  case AtomicOrdering::AcquireRelease: v = 4; break;
736  case AtomicOrdering::SequentiallyConsistent: v = 5; break;
737  }
738  return IRB->getInt32(v);
739 }
740 
741 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
742 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
743 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
744 // instead we simply replace them with regular function calls, which are then
745 // intercepted by the run-time.
746 // Since tsan is running after everyone else, the calls should not be
747 // replaced back with intrinsics. If that becomes wrong at some point,
748 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
749 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
750  IRBuilder<> IRB(I);
751  if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
752  IRB.CreateCall(
753  MemsetFn,
754  {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
755  IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
756  IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
757  I->eraseFromParent();
758  } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
759  IRB.CreateCall(
760  isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
761  {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
762  IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
763  IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
764  I->eraseFromParent();
765  }
766  return false;
767 }
768 
769 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
770 // standards. For background see C++11 standard. A slightly older, publicly
771 // available draft of the standard (not entirely up-to-date, but close enough
772 // for casual browsing) is available here:
773 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
774 // The following page contains more background information:
775 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
776 
777 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
778  IRBuilder<> IRB(I);
779  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
780  Value *Addr = LI->getPointerOperand();
781  Type *OrigTy = LI->getType();
782  int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
783  if (Idx < 0)
784  return false;
785  const unsigned ByteSize = 1U << Idx;
786  const unsigned BitSize = ByteSize * 8;
787  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
788  Type *PtrTy = Ty->getPointerTo();
789  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
790  createOrdering(&IRB, LI->getOrdering())};
791  Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
792  Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
793  I->replaceAllUsesWith(Cast);
794  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
795  Value *Addr = SI->getPointerOperand();
796  int Idx =
797  getMemoryAccessFuncIndex(SI->getValueOperand()->getType(), Addr, DL);
798  if (Idx < 0)
799  return false;
800  const unsigned ByteSize = 1U << Idx;
801  const unsigned BitSize = ByteSize * 8;
802  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
803  Type *PtrTy = Ty->getPointerTo();
804  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
805  IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
806  createOrdering(&IRB, SI->getOrdering())};
807  CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
809  } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
810  Value *Addr = RMWI->getPointerOperand();
811  int Idx =
812  getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(), Addr, DL);
813  if (Idx < 0)
814  return false;
815  FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
816  if (!F)
817  return false;
818  const unsigned ByteSize = 1U << Idx;
819  const unsigned BitSize = ByteSize * 8;
820  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
821  Type *PtrTy = Ty->getPointerTo();
822  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
823  IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
824  createOrdering(&IRB, RMWI->getOrdering())};
827  } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
828  Value *Addr = CASI->getPointerOperand();
829  Type *OrigOldValTy = CASI->getNewValOperand()->getType();
830  int Idx = getMemoryAccessFuncIndex(OrigOldValTy, Addr, DL);
831  if (Idx < 0)
832  return false;
833  const unsigned ByteSize = 1U << Idx;
834  const unsigned BitSize = ByteSize * 8;
835  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
836  Type *PtrTy = Ty->getPointerTo();
837  Value *CmpOperand =
838  IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
839  Value *NewOperand =
840  IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
841  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
842  CmpOperand,
843  NewOperand,
844  createOrdering(&IRB, CASI->getSuccessOrdering()),
845  createOrdering(&IRB, CASI->getFailureOrdering())};
846  CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
847  Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
848  Value *OldVal = C;
849  if (Ty != OrigOldValTy) {
850  // The value is a pointer, so we need to cast the return value.
851  OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
852  }
853 
854  Value *Res =
855  IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
856  Res = IRB.CreateInsertValue(Res, Success, 1);
857 
858  I->replaceAllUsesWith(Res);
859  I->eraseFromParent();
860  } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
861  Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
862  FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
863  ? TsanAtomicSignalFence
864  : TsanAtomicThreadFence;
867  }
868  return true;
869 }
870 
871 int ThreadSanitizer::getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr,
872  const DataLayout &DL) {
873  assert(OrigTy->isSized());
874  assert(
875  cast<PointerType>(Addr->getType())->isOpaqueOrPointeeTypeMatches(OrigTy));
876  uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
877  if (TypeSize != 8 && TypeSize != 16 &&
878  TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
879  NumAccessesWithBadSize++;
880  // Ignore all unusual sizes.
881  return -1;
882  }
883  size_t Idx = countTrailingZeros(TypeSize / 8);
885  return Idx;
886 }
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
Instrumentation.h
llvm::createThreadSanitizerLegacyPassPass
FunctionPass * createThreadSanitizerLegacyPassPass()
Definition: ThreadSanitizer.cpp:242
llvm::AtomicOrdering::AcquireRelease
@ AcquireRelease
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:263
MathExtras.h
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:22
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::AArch64CC::AL
@ AL
Definition: AArch64BaseInfo.h:269
llvm::AttributeList::addFnAttribute
LLVM_NODISCARD AttributeList addFnAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a function attribute to the list.
Definition: Attributes.h:490
Optional.h
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1400
Metadata.h
IntrinsicInst.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:783
llvm::EscapeEnumerator
EscapeEnumerator - This is a little algorithm to find all escape points from a function so that "fina...
Definition: EscapeEnumerator.h:29
ThreadSanitizer.h
llvm::MemTransferInst
This class wraps the llvm.memcpy/memmove intrinsics.
Definition: IntrinsicInst.h:969
llvm::Function
Definition: Function.h:62
kNumberOfAccessSizes
static const size_t kNumberOfAccessSizes
Definition: AddressSanitizer.cpp:180
llvm::AtomicRMWInst::Xor
@ Xor
*p = old ^ v
Definition: Instructions.h:757
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1176
Statistic.h
CaptureTracking.h
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:736
llvm::IRBuilder<>
llvm::GlobalVariable
Definition: GlobalVariable.h:39
ValueTracking.h
Local.h
llvm::AtomicOrdering::SequentiallyConsistent
@ SequentiallyConsistent
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
FAM
FunctionAnalysisManager FAM
Definition: PassBuilderBindings.cpp:59
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::getLoadStoreType
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Definition: Instructions.h:5364
llvm::PreservedAnalyses::none
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:158
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
Module.h
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:358
llvm::AttributeList
Definition: Attributes.h:403
isAtomic
static bool isAtomic(Instruction *I)
Definition: ThreadSanitizer.cpp:530
llvm::Optional
Definition: APInt.h:33
llvm::FenceInst
An instruction for ordering other memory operations.
Definition: Instructions.h:449
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:893
initialize
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
Definition: TargetLibraryInfo.cpp:116
llvm::AtomicRMWInst::FIRST_BINOP
@ FIRST_BINOP
Definition: Instructions.h:773
HasCalls
@ HasCalls
Definition: AArch64InstrInfo.cpp:6536
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:241
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::AtomicOrdering::Monotonic
@ Monotonic
kTsanModuleCtorName
const char kTsanModuleCtorName[]
Definition: ThreadSanitizer.cpp:100
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::getOrCreateSanitizerCtorAndInitFunctions
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef())
Creates sanitizer constructor function lazily.
Definition: ModuleUtils.cpp:158
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
createOrdering
static ConstantInt * createOrdering(IRBuilder<> *IRB, AtomicOrdering ord)
Definition: ThreadSanitizer.cpp:724
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
SmallString.h
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
MAM
ModuleAnalysisManager MAM
Definition: PassBuilderBindings.cpp:61
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1521
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::ReplaceInstWithInst
void ReplaceInstWithInst(BasicBlock::InstListType &BIL, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Definition: BasicBlockUtils.cpp:479
TargetLibraryInfo.h
false
Definition: StackSlotColoring.cpp:142
shouldInstrumentReadWriteFromAddress
static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr)
Definition: ThreadSanitizer.cpp:409
llvm::ModuleThreadSanitizerPass::run
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: ThreadSanitizer.cpp:209
llvm::IntegerType
Class to represent integer types.
Definition: DerivedTypes.h:40
llvm::Instruction
Definition: Instruction.h:45
InstrProf.h
llvm::AtomicOrdering::Acquire
@ Acquire
llvm::Triple::getObjectFormat
ObjectFormatType getObjectFormat() const
Get the object format for this triple.
Definition: Triple.h:337
llvm::AtomicRMWInst::Nand
@ Nand
*p = ~(old & v)
Definition: Instructions.h:753
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1775
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:925
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4280
ClInstrumentAtomics
static cl::opt< bool > ClInstrumentAtomics("tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden)
llvm::AtomicRMWInst::Xchg
@ Xchg
*p = v
Definition: Instructions.h:745
llvm::AtomicRMWInst::Add
@ Add
*p = old + v
Definition: Instructions.h:747
Type.h
llvm::MemSetInst
This class wraps the llvm.memset intrinsic.
Definition: IntrinsicInst.h:957
ClInstrumentReadBeforeWrite
static cl::opt< bool > ClInstrumentReadBeforeWrite("tsan-instrument-read-before-write", cl::init(false), cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::Hidden)
llvm::SmallString< 32 >
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
ClInstrumentMemIntrinsics
static cl::opt< bool > ClInstrumentMemIntrinsics("tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden)
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:190
llvm::AtomicOrdering
AtomicOrdering
Atomic ordering for LLVM's memory model.
Definition: AtomicOrdering.h:56
llvm::cl::opt< bool >
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:309
llvm::AtomicRMWInst::Sub
@ Sub
*p = old - v
Definition: Instructions.h:749
llvm::ThreadSanitizerPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Definition: ThreadSanitizer.cpp:201
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:465
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:79
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::IRBuilderBase::getInt32
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:478
llvm::DenseMap
Definition: DenseMap.h:714
llvm::AtomicOrdering::Unordered
@ Unordered
I
#define I(x, y, z)
Definition: MD5.cpp:58
StringExtras.h
llvm::SyncScope::SingleThread
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:55
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::AtomicRMWInst::Or
@ Or
*p = old | v
Definition: Instructions.h:755
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
ClCompoundReadBeforeWrite
static cl::opt< bool > ClCompoundReadBeforeWrite("tsan-compound-read-before-write", cl::init(false), cl::desc("Emit special compound instrumentation for reads-before-writes"), cl::Hidden)
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan", "ThreadSanitizer: detects data races.", false, false) INITIALIZE_PASS_END(ThreadSanitizerLegacyPass
ClInstrumentMemoryAccesses
static cl::opt< bool > ClInstrumentMemoryAccesses("tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden)
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::MDNode
Metadata node.
Definition: Metadata.h:906
llvm::PointerMayBeCaptured
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Definition: CaptureTracking.cpp:215
DataLayout.h
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
ClInstrumentFuncEntryExit
static cl::opt< bool > ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), cl::desc("Instrument function entry and exit"), cl::Hidden)
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::maybeMarkSanitizerLibraryCallNoBuiltin
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3239
llvm::AMDGPU::HSAMD::Kernel::Arg::Key::IsVolatile
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Definition: AMDGPUMetadata.h:194
ClHandleCxxExceptions
static cl::opt< bool > ClHandleCxxExceptions("tsan-handle-cxx-exceptions", cl::init(true), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::Hidden)
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:287
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:180
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:731
llvm::pdb::PDB_DataKind::Local
@ Local
llvm::AtomicOrdering::Release
@ Release
llvm::MachO::All
@ All
Definition: InterfaceFile.h:69
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::Type::getIntNTy
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:245
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:325
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::SectionName
Definition: DWARFSection.h:21
Success
#define Success
Definition: AArch64Disassembler.cpp:266
llvm::AtomicRMWInst::And
@ And
*p = old & v
Definition: Instructions.h:751
llvm::TypeSize
Definition: TypeSize.h:416
Function.h
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:776
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:221
ClDistinguishVolatile
static cl::opt< bool > ClDistinguishVolatile("tsan-distinguish-volatile", cl::init(false), cl::desc("Emit special instrumentation for accesses to volatiles"), cl::Hidden)
llvm::getInstrProfSectionName
std::string getInstrProfSectionName(InstrProfSectKind IPSK, Triple::ObjectFormatType OF, bool AddSegmentInfo=true)
Return the name of the profile section corresponding to IPSK.
Definition: InstrProf.cpp:213
Instructions.h
SmallVector.h
ModuleUtils.h
EscapeEnumerator.h
llvm::FunctionCallee
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
tsan
tsan
Definition: ThreadSanitizer.cpp:219
llvm::SmallVectorImpl< Instruction * >
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1478
llvm::AtomicRMWInst::LAST_BINOP
@ LAST_BINOP
Definition: Instructions.h:774
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVMContext.h
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::appendToGlobalCtors
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:66
llvm::cl::desc
Definition: CommandLine.h:412
isVtableAccess
static bool isVtableAccess(Instruction *I)
Definition: ThreadSanitizer.cpp:401
raw_ostream.h
BasicBlockUtils.h
InitializePasses.h
kTsanInitName
const char kTsanInitName[]
Definition: ThreadSanitizer.cpp:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:526
llvm::initializeThreadSanitizerLegacyPassPass
void initializeThreadSanitizerLegacyPassPass(PassRegistry &)
Debug.h
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:440
llvm::AtomicOrdering::NotAtomic
@ NotAtomic
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38