LLVM  14.0.0git
ThreadSanitizer.cpp
Go to the documentation of this file.
1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer, a race detector.
10 //
11 // The tool is under development, for the details about previous versions see
12 // http://code.google.com/p/data-race-test
13 //
14 // The instrumentation phase is quite simple:
15 // - Insert calls to run-time library before every memory access.
16 // - Optimizations may apply to avoid instrumenting some of the accesses.
17 // - Insert calls at function entry/exit.
18 // The rest is handled by the run-time library.
19 //===----------------------------------------------------------------------===//
20 
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Optional.h"
24 #include "llvm/ADT/SmallString.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/IR/Metadata.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/InitializePasses.h"
44 #include "llvm/Support/Debug.h"
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "tsan"
56 
58  "tsan-instrument-memory-accesses", cl::init(true),
59  cl::desc("Instrument memory accesses"), cl::Hidden);
60 static cl::opt<bool>
61  ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true),
62  cl::desc("Instrument function entry and exit"),
63  cl::Hidden);
65  "tsan-handle-cxx-exceptions", cl::init(true),
66  cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
67  cl::Hidden);
68 static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics",
69  cl::init(true),
70  cl::desc("Instrument atomics"),
71  cl::Hidden);
73  "tsan-instrument-memintrinsics", cl::init(true),
74  cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
76  "tsan-distinguish-volatile", cl::init(false),
77  cl::desc("Emit special instrumentation for accesses to volatiles"),
78  cl::Hidden);
80  "tsan-instrument-read-before-write", cl::init(false),
81  cl::desc("Do not eliminate read instrumentation for read-before-writes"),
82  cl::Hidden);
84  "tsan-compound-read-before-write", cl::init(false),
85  cl::desc("Emit special compound instrumentation for reads-before-writes"),
86  cl::Hidden);
87 
88 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
89 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
90 STATISTIC(NumOmittedReadsBeforeWrite,
91  "Number of reads ignored due to following writes");
92 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
93 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
94 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
95 STATISTIC(NumOmittedReadsFromConstantGlobals,
96  "Number of reads from constant globals");
97 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
98 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
99 
100 const char kTsanModuleCtorName[] = "tsan.module_ctor";
101 const char kTsanInitName[] = "__tsan_init";
102 
103 namespace {
104 
105 /// ThreadSanitizer: instrument the code in module to find races.
106 ///
107 /// Instantiating ThreadSanitizer inserts the tsan runtime library API function
108 /// declarations into the module if they don't exist already. Instantiating
109 /// ensures the __tsan_init function is in the list of global constructors for
110 /// the module.
111 struct ThreadSanitizer {
112  ThreadSanitizer() {
113  // Sanity check options and warn user.
115  errs()
116  << "warning: Option -tsan-compound-read-before-write has no effect "
117  "when -tsan-instrument-read-before-write is set.\n";
118  }
119  }
120 
121  bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
122 
123 private:
124  // Internal Instruction wrapper that contains more information about the
125  // Instruction from prior analysis.
126  struct InstructionInfo {
127  // Instrumentation emitted for this instruction is for a compounded set of
128  // read and write operations in the same basic block.
129  static constexpr unsigned kCompoundRW = (1U << 0);
130 
131  explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {}
132 
133  Instruction *Inst;
134  unsigned Flags = 0;
135  };
136 
137  void initialize(Module &M);
138  bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL);
139  bool instrumentAtomic(Instruction *I, const DataLayout &DL);
140  bool instrumentMemIntrinsic(Instruction *I);
141  void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
143  const DataLayout &DL);
144  bool addrPointsToConstantData(Value *Addr);
145  int getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr, const DataLayout &DL);
146  void InsertRuntimeIgnores(Function &F);
147 
148  Type *IntptrTy;
149  FunctionCallee TsanFuncEntry;
150  FunctionCallee TsanFuncExit;
151  FunctionCallee TsanIgnoreBegin;
152  FunctionCallee TsanIgnoreEnd;
153  // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
154  static const size_t kNumberOfAccessSizes = 5;
157  FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
158  FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
159  FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
160  FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
161  FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
162  FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
163  FunctionCallee TsanCompoundRW[kNumberOfAccessSizes];
164  FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes];
165  FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
166  FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
167  FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
169  FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
170  FunctionCallee TsanAtomicThreadFence;
171  FunctionCallee TsanAtomicSignalFence;
172  FunctionCallee TsanVptrUpdate;
173  FunctionCallee TsanVptrLoad;
174  FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
175 };
176 
177 struct ThreadSanitizerLegacyPass : FunctionPass {
178  ThreadSanitizerLegacyPass() : FunctionPass(ID) {
180  }
181  StringRef getPassName() const override;
182  void getAnalysisUsage(AnalysisUsage &AU) const override;
183  bool runOnFunction(Function &F) override;
184  bool doInitialization(Module &M) override;
185  static char ID; // Pass identification, replacement for typeid.
186 private:
188 };
189 
190 void insertModuleCtor(Module &M) {
192  M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
193  /*InitArgs=*/{},
194  // This callback is invoked when the functions are created the first
195  // time. Hook them into the global ctors list in that case:
196  [&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
197 }
198 
199 } // namespace
200 
203  ThreadSanitizer TSan;
204  if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
205  return PreservedAnalyses::none();
206  return PreservedAnalyses::all();
207 }
208 
211  insertModuleCtor(M);
212  return PreservedAnalyses::none();
213 }
214 
216 INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
217  "ThreadSanitizer: detects data races.", false, false)
219 INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
220  "ThreadSanitizer: detects data races.", false, false)
221 
222 StringRef ThreadSanitizerLegacyPass::getPassName() const {
223  return "ThreadSanitizerLegacyPass";
224 }
225 
226 void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
228 }
229 
230 bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
231  insertModuleCtor(M);
232  TSan.emplace();
233  return true;
234 }
235 
237  auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
238  TSan->sanitizeFunction(F, TLI);
239  return true;
240 }
241 
243  return new ThreadSanitizerLegacyPass();
244 }
245 
247  const DataLayout &DL = M.getDataLayout();
248  IntptrTy = DL.getIntPtrType(M.getContext());
249 
250  IRBuilder<> IRB(M.getContext());
251  AttributeList Attr;
252  Attr = Attr.addFnAttribute(M.getContext(), Attribute::NoUnwind);
253  // Initialize the callbacks.
254  TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
255  IRB.getVoidTy(), IRB.getInt8PtrTy());
256  TsanFuncExit =
257  M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
258  TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
259  IRB.getVoidTy());
260  TsanIgnoreEnd =
261  M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
262  IntegerType *OrdTy = IRB.getInt32Ty();
263  for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
264  const unsigned ByteSize = 1U << i;
265  const unsigned BitSize = ByteSize * 8;
266  std::string ByteSizeStr = utostr(ByteSize);
267  std::string BitSizeStr = utostr(BitSize);
268  SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
269  TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
270  IRB.getInt8PtrTy());
271 
272  SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
273  TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
274  IRB.getInt8PtrTy());
275 
276  SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
277  TsanUnalignedRead[i] = M.getOrInsertFunction(
278  UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
279 
280  SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
281  TsanUnalignedWrite[i] = M.getOrInsertFunction(
282  UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
283 
284  SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
285  TsanVolatileRead[i] = M.getOrInsertFunction(
286  VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
287 
288  SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
289  TsanVolatileWrite[i] = M.getOrInsertFunction(
290  VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
291 
292  SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
293  ByteSizeStr);
294  TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
295  UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
296 
297  SmallString<64> UnalignedVolatileWriteName(
298  "__tsan_unaligned_volatile_write" + ByteSizeStr);
299  TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
300  UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
301 
302  SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
303  TsanCompoundRW[i] = M.getOrInsertFunction(
304  CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
305 
306  SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
307  ByteSizeStr);
308  TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
309  UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
310 
311  Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
312  Type *PtrTy = Ty->getPointerTo();
313  SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
314  {
315  AttributeList AL = Attr;
316  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
317  TsanAtomicLoad[i] =
318  M.getOrInsertFunction(AtomicLoadName, AL, Ty, PtrTy, OrdTy);
319  }
320 
321  SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
322  {
323  AttributeList AL = Attr;
324  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
325  AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
326  TsanAtomicStore[i] = M.getOrInsertFunction(
327  AtomicStoreName, AL, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
328  }
329 
330  for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
332  TsanAtomicRMW[Op][i] = nullptr;
333  const char *NamePart = nullptr;
334  if (Op == AtomicRMWInst::Xchg)
335  NamePart = "_exchange";
336  else if (Op == AtomicRMWInst::Add)
337  NamePart = "_fetch_add";
338  else if (Op == AtomicRMWInst::Sub)
339  NamePart = "_fetch_sub";
340  else if (Op == AtomicRMWInst::And)
341  NamePart = "_fetch_and";
342  else if (Op == AtomicRMWInst::Or)
343  NamePart = "_fetch_or";
344  else if (Op == AtomicRMWInst::Xor)
345  NamePart = "_fetch_xor";
346  else if (Op == AtomicRMWInst::Nand)
347  NamePart = "_fetch_nand";
348  else
349  continue;
350  SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
351  {
352  AttributeList AL = Attr;
353  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
354  AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
355  TsanAtomicRMW[Op][i] =
356  M.getOrInsertFunction(RMWName, AL, Ty, PtrTy, Ty, OrdTy);
357  }
358  }
359 
360  SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
361  "_compare_exchange_val");
362  {
363  AttributeList AL = Attr;
364  AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
365  AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
366  AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
367  AL = AL.addParamAttribute(M.getContext(), 4, Attribute::ZExt);
368  TsanAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, AL, Ty, PtrTy, Ty,
369  Ty, OrdTy, OrdTy);
370  }
371  }
372  TsanVptrUpdate =
373  M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
374  IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
375  TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
376  IRB.getVoidTy(), IRB.getInt8PtrTy());
377  {
378  AttributeList AL = Attr;
379  AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
380  TsanAtomicThreadFence = M.getOrInsertFunction("__tsan_atomic_thread_fence",
381  AL, IRB.getVoidTy(), OrdTy);
382  }
383  {
384  AttributeList AL = Attr;
385  AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
386  TsanAtomicSignalFence = M.getOrInsertFunction("__tsan_atomic_signal_fence",
387  AL, IRB.getVoidTy(), OrdTy);
388  }
389 
390  MemmoveFn =
391  M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
392  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
393  MemcpyFn =
394  M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
395  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
396  MemsetFn =
397  M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
398  IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
399 }
400 
401 static bool isVtableAccess(Instruction *I) {
402  if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
403  return Tag->isTBAAVtableAccess();
404  return false;
405 }
406 
407 // Do not instrument known races/"benign races" that come from compiler
408 // instrumentatin. The user has no way of suppressing them.
410  // Peel off GEPs and BitCasts.
411  Addr = Addr->stripInBoundsOffsets();
412 
413  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
414  if (GV->hasSection()) {
415  StringRef SectionName = GV->getSection();
416  // Check if the global is in the PGO counters section.
417  auto OF = Triple(M->getTargetTriple()).getObjectFormat();
418  if (SectionName.endswith(
419  getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
420  return false;
421  }
422 
423  // Check if the global is private gcov data.
424  if (GV->getName().startswith("__llvm_gcov") ||
425  GV->getName().startswith("__llvm_gcda"))
426  return false;
427  }
428 
429  // Do not instrument acesses from different address spaces; we cannot deal
430  // with them.
431  if (Addr) {
432  Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
433  if (PtrTy->getPointerAddressSpace() != 0)
434  return false;
435  }
436 
437  return true;
438 }
439 
440 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
441  // If this is a GEP, just analyze its pointer operand.
442  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
443  Addr = GEP->getPointerOperand();
444 
445  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
446  if (GV->isConstant()) {
447  // Reads from constant globals can not race with any writes.
448  NumOmittedReadsFromConstantGlobals++;
449  return true;
450  }
451  } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
452  if (isVtableAccess(L)) {
453  // Reads from a vtable pointer can not race with any writes.
454  NumOmittedReadsFromVtable++;
455  return true;
456  }
457  }
458  return false;
459 }
460 
461 // Instrumenting some of the accesses may be proven redundant.
462 // Currently handled:
463 // - read-before-write (within same BB, no calls between)
464 // - not captured variables
465 //
466 // We do not handle some of the patterns that should not survive
467 // after the classic compiler optimizations.
468 // E.g. two reads from the same temp should be eliminated by CSE,
469 // two writes should be eliminated by DSE, etc.
470 //
471 // 'Local' is a vector of insns within the same BB (no calls between).
472 // 'All' is a vector of insns that will be instrumented.
473 void ThreadSanitizer::chooseInstructionsToInstrument(
476  DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All
477  // Iterate from the end.
478  for (Instruction *I : reverse(Local)) {
479  const bool IsWrite = isa<StoreInst>(*I);
480  Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand()
481  : cast<LoadInst>(I)->getPointerOperand();
482 
483  if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
484  continue;
485 
486  if (!IsWrite) {
487  const auto WriteEntry = WriteTargets.find(Addr);
488  if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) {
489  auto &WI = All[WriteEntry->second];
490  // If we distinguish volatile accesses and if either the read or write
491  // is volatile, do not omit any instrumentation.
492  const bool AnyVolatile =
493  ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() ||
494  cast<StoreInst>(WI.Inst)->isVolatile());
495  if (!AnyVolatile) {
496  // We will write to this temp, so no reason to analyze the read.
497  // Mark the write instruction as compound.
498  WI.Flags |= InstructionInfo::kCompoundRW;
499  NumOmittedReadsBeforeWrite++;
500  continue;
501  }
502  }
503 
504  if (addrPointsToConstantData(Addr)) {
505  // Addr points to some constant data -- it can not race with any writes.
506  continue;
507  }
508  }
509 
510  if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
511  !PointerMayBeCaptured(Addr, true, true)) {
512  // The variable is addressable but not captured, so it cannot be
513  // referenced from a different thread and participate in a data race
514  // (see llvm/Analysis/CaptureTracking.h for details).
515  NumOmittedNonCaptured++;
516  continue;
517  }
518 
519  // Instrument this instruction.
520  All.emplace_back(I);
521  if (IsWrite) {
522  // For read-before-write and compound instrumentation we only need one
523  // write target, and we can override any previous entry if it exists.
524  WriteTargets[Addr] = All.size() - 1;
525  }
526  }
527  Local.clear();
528 }
529 
530 static bool isAtomic(Instruction *I) {
531  // TODO: Ask TTI whether synchronization scope is between threads.
532  if (LoadInst *LI = dyn_cast<LoadInst>(I))
533  return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
534  if (StoreInst *SI = dyn_cast<StoreInst>(I))
535  return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
536  if (isa<AtomicRMWInst>(I))
537  return true;
538  if (isa<AtomicCmpXchgInst>(I))
539  return true;
540  if (isa<FenceInst>(I))
541  return true;
542  return false;
543 }
544 
545 void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
546  IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
547  IRB.CreateCall(TsanIgnoreBegin);
548  EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
549  while (IRBuilder<> *AtExit = EE.Next()) {
550  AtExit->CreateCall(TsanIgnoreEnd);
551  }
552 }
553 
554 bool ThreadSanitizer::sanitizeFunction(Function &F,
555  const TargetLibraryInfo &TLI) {
556  // This is required to prevent instrumenting call to __tsan_init from within
557  // the module constructor.
558  if (F.getName() == kTsanModuleCtorName)
559  return false;
560  // Naked functions can not have prologue/epilogue
561  // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
562  // all.
563  if (F.hasFnAttribute(Attribute::Naked))
564  return false;
565 
566  // __attribute__(disable_sanitizer_instrumentation) prevents all kinds of
567  // instrumentation.
568  if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
569  return false;
570 
571  initialize(*F.getParent());
572  SmallVector<InstructionInfo, 8> AllLoadsAndStores;
573  SmallVector<Instruction*, 8> LocalLoadsAndStores;
574  SmallVector<Instruction*, 8> AtomicAccesses;
575  SmallVector<Instruction*, 8> MemIntrinCalls;
576  bool Res = false;
577  bool HasCalls = false;
578  bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
579  const DataLayout &DL = F.getParent()->getDataLayout();
580 
581  // Traverse all instructions, collect loads/stores/returns, check for calls.
582  for (auto &BB : F) {
583  for (auto &Inst : BB) {
584  if (isAtomic(&Inst))
585  AtomicAccesses.push_back(&Inst);
586  else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
587  LocalLoadsAndStores.push_back(&Inst);
588  else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
589  if (CallInst *CI = dyn_cast<CallInst>(&Inst))
591  if (isa<MemIntrinsic>(Inst))
592  MemIntrinCalls.push_back(&Inst);
593  HasCalls = true;
594  chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
595  DL);
596  }
597  }
598  chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
599  }
600 
601  // We have collected all loads and stores.
602  // FIXME: many of these accesses do not need to be checked for races
603  // (e.g. variables that do not escape, etc).
604 
605  // Instrument memory accesses only if we want to report bugs in the function.
606  if (ClInstrumentMemoryAccesses && SanitizeFunction)
607  for (const auto &II : AllLoadsAndStores) {
608  Res |= instrumentLoadOrStore(II, DL);
609  }
610 
611  // Instrument atomic memory accesses in any case (they can be used to
612  // implement synchronization).
614  for (auto Inst : AtomicAccesses) {
615  Res |= instrumentAtomic(Inst, DL);
616  }
617 
618  if (ClInstrumentMemIntrinsics && SanitizeFunction)
619  for (auto Inst : MemIntrinCalls) {
620  Res |= instrumentMemIntrinsic(Inst);
621  }
622 
623  if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
624  assert(!F.hasFnAttribute(Attribute::SanitizeThread));
625  if (HasCalls)
626  InsertRuntimeIgnores(F);
627  }
628 
629  // Instrument function entry/exit points if there were instrumented accesses.
630  if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
631  IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
632  Value *ReturnAddress = IRB.CreateCall(
633  Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
634  IRB.getInt32(0));
635  IRB.CreateCall(TsanFuncEntry, ReturnAddress);
636 
637  EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
638  while (IRBuilder<> *AtExit = EE.Next()) {
639  AtExit->CreateCall(TsanFuncExit, {});
640  }
641  Res = true;
642  }
643  return Res;
644 }
645 
646 bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
647  const DataLayout &DL) {
648  IRBuilder<> IRB(II.Inst);
649  const bool IsWrite = isa<StoreInst>(*II.Inst);
650  Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
651  : cast<LoadInst>(II.Inst)->getPointerOperand();
652  Type *OrigTy = getLoadStoreType(II.Inst);
653 
654  // swifterror memory addresses are mem2reg promoted by instruction selection.
655  // As such they cannot have regular uses like an instrumentation function and
656  // it makes no sense to track them as memory.
657  if (Addr->isSwiftError())
658  return false;
659 
660  int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
661  if (Idx < 0)
662  return false;
663  if (IsWrite && isVtableAccess(II.Inst)) {
664  LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n");
665  Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
666  // StoredValue may be a vector type if we are storing several vptrs at once.
667  // In this case, just take the first element of the vector since this is
668  // enough to find vptr races.
669  if (isa<VectorType>(StoredValue->getType()))
670  StoredValue = IRB.CreateExtractElement(
671  StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
672  if (StoredValue->getType()->isIntegerTy())
673  StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
674  // Call TsanVptrUpdate.
675  IRB.CreateCall(TsanVptrUpdate,
676  {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
677  IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
678  NumInstrumentedVtableWrites++;
679  return true;
680  }
681  if (!IsWrite && isVtableAccess(II.Inst)) {
682  IRB.CreateCall(TsanVptrLoad,
683  IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
684  NumInstrumentedVtableReads++;
685  return true;
686  }
687 
688  const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
689  : cast<LoadInst>(II.Inst)->getAlignment();
690  const bool IsCompoundRW =
691  ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW);
692  const bool IsVolatile = ClDistinguishVolatile &&
693  (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
694  : cast<LoadInst>(II.Inst)->isVolatile());
695  assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!");
696 
697  const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
698  FunctionCallee OnAccessFunc = nullptr;
699  if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
700  if (IsCompoundRW)
701  OnAccessFunc = TsanCompoundRW[Idx];
702  else if (IsVolatile)
703  OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
704  else
705  OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
706  } else {
707  if (IsCompoundRW)
708  OnAccessFunc = TsanUnalignedCompoundRW[Idx];
709  else if (IsVolatile)
710  OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
711  : TsanUnalignedVolatileRead[Idx];
712  else
713  OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
714  }
715  IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
716  if (IsCompoundRW || IsWrite)
717  NumInstrumentedWrites++;
718  if (IsCompoundRW || !IsWrite)
719  NumInstrumentedReads++;
720  return true;
721 }
722 
724  uint32_t v = 0;
725  switch (ord) {
727  llvm_unreachable("unexpected atomic ordering!");
729  case AtomicOrdering::Monotonic: v = 0; break;
730  // Not specified yet:
731  // case AtomicOrdering::Consume: v = 1; break;
732  case AtomicOrdering::Acquire: v = 2; break;
733  case AtomicOrdering::Release: v = 3; break;
734  case AtomicOrdering::AcquireRelease: v = 4; break;
735  case AtomicOrdering::SequentiallyConsistent: v = 5; break;
736  }
737  return IRB->getInt32(v);
738 }
739 
740 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
741 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
742 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
743 // instead we simply replace them with regular function calls, which are then
744 // intercepted by the run-time.
745 // Since tsan is running after everyone else, the calls should not be
746 // replaced back with intrinsics. If that becomes wrong at some point,
747 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
748 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
749  IRBuilder<> IRB(I);
750  if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
751  IRB.CreateCall(
752  MemsetFn,
753  {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
754  IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
755  IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
756  I->eraseFromParent();
757  } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
758  IRB.CreateCall(
759  isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
760  {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
761  IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
762  IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
763  I->eraseFromParent();
764  }
765  return false;
766 }
767 
768 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
769 // standards. For background see C++11 standard. A slightly older, publicly
770 // available draft of the standard (not entirely up-to-date, but close enough
771 // for casual browsing) is available here:
772 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
773 // The following page contains more background information:
774 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
775 
776 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
777  IRBuilder<> IRB(I);
778  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
779  Value *Addr = LI->getPointerOperand();
780  Type *OrigTy = LI->getType();
781  int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
782  if (Idx < 0)
783  return false;
784  const unsigned ByteSize = 1U << Idx;
785  const unsigned BitSize = ByteSize * 8;
786  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
787  Type *PtrTy = Ty->getPointerTo();
788  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
789  createOrdering(&IRB, LI->getOrdering())};
790  Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
791  Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
792  I->replaceAllUsesWith(Cast);
793  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
794  Value *Addr = SI->getPointerOperand();
795  int Idx =
796  getMemoryAccessFuncIndex(SI->getValueOperand()->getType(), Addr, DL);
797  if (Idx < 0)
798  return false;
799  const unsigned ByteSize = 1U << Idx;
800  const unsigned BitSize = ByteSize * 8;
801  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
802  Type *PtrTy = Ty->getPointerTo();
803  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
804  IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
805  createOrdering(&IRB, SI->getOrdering())};
806  CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
808  } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
809  Value *Addr = RMWI->getPointerOperand();
810  int Idx =
811  getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(), Addr, DL);
812  if (Idx < 0)
813  return false;
814  FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
815  if (!F)
816  return false;
817  const unsigned ByteSize = 1U << Idx;
818  const unsigned BitSize = ByteSize * 8;
819  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
820  Type *PtrTy = Ty->getPointerTo();
821  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
822  IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
823  createOrdering(&IRB, RMWI->getOrdering())};
826  } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
827  Value *Addr = CASI->getPointerOperand();
828  Type *OrigOldValTy = CASI->getNewValOperand()->getType();
829  int Idx = getMemoryAccessFuncIndex(OrigOldValTy, Addr, DL);
830  if (Idx < 0)
831  return false;
832  const unsigned ByteSize = 1U << Idx;
833  const unsigned BitSize = ByteSize * 8;
834  Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
835  Type *PtrTy = Ty->getPointerTo();
836  Value *CmpOperand =
837  IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
838  Value *NewOperand =
839  IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
840  Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
841  CmpOperand,
842  NewOperand,
843  createOrdering(&IRB, CASI->getSuccessOrdering()),
844  createOrdering(&IRB, CASI->getFailureOrdering())};
845  CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
846  Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
847  Value *OldVal = C;
848  if (Ty != OrigOldValTy) {
849  // The value is a pointer, so we need to cast the return value.
850  OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
851  }
852 
853  Value *Res =
854  IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
855  Res = IRB.CreateInsertValue(Res, Success, 1);
856 
857  I->replaceAllUsesWith(Res);
858  I->eraseFromParent();
859  } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
860  Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
861  FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
862  ? TsanAtomicSignalFence
863  : TsanAtomicThreadFence;
866  }
867  return true;
868 }
869 
870 int ThreadSanitizer::getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr,
871  const DataLayout &DL) {
872  assert(OrigTy->isSized());
873  assert(
874  cast<PointerType>(Addr->getType())->isOpaqueOrPointeeTypeMatches(OrigTy));
875  uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
876  if (TypeSize != 8 && TypeSize != 16 &&
877  TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
878  NumAccessesWithBadSize++;
879  // Ignore all unusual sizes.
880  return -1;
881  }
882  size_t Idx = countTrailingZeros(TypeSize / 8);
884  return Idx;
885 }
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
Instrumentation.h
llvm::createThreadSanitizerLegacyPassPass
FunctionPass * createThreadSanitizerLegacyPassPass()
Definition: ThreadSanitizer.cpp:242
llvm::AtomicOrdering::AcquireRelease
@ AcquireRelease
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:263
MathExtras.h
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::AArch64CC::AL
@ AL
Definition: AArch64BaseInfo.h:269
llvm::AttributeList::addFnAttribute
LLVM_NODISCARD AttributeList addFnAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a function attribute to the list.
Definition: Attributes.h:484
Optional.h
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1379
Metadata.h
IntrinsicInst.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:783
llvm::EscapeEnumerator
EscapeEnumerator - This is a little algorithm to find all escape points from a function so that "fina...
Definition: EscapeEnumerator.h:29
ThreadSanitizer.h
llvm::MemTransferInst
This class wraps the llvm.memcpy/memmove intrinsics.
Definition: IntrinsicInst.h:917
llvm::Function
Definition: Function.h:62
kNumberOfAccessSizes
static const size_t kNumberOfAccessSizes
Definition: AddressSanitizer.cpp:177
llvm::AtomicRMWInst::Xor
@ Xor
*p = old ^ v
Definition: Instructions.h:752
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
Statistic.h
CaptureTracking.h
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:734
llvm::IRBuilder<>
llvm::GlobalVariable
Definition: GlobalVariable.h:40
ValueTracking.h
Local.h
llvm::AtomicOrdering::SequentiallyConsistent
@ SequentiallyConsistent
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
FAM
FunctionAnalysisManager FAM
Definition: PassBuilderBindings.cpp:59
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::getLoadStoreType
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Definition: Instructions.h:5344
llvm::PreservedAnalyses::none
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:158
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
Module.h
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:333
llvm::AttributeList
Definition: Attributes.h:399
isAtomic
static bool isAtomic(Instruction *I)
Definition: ThreadSanitizer.cpp:530
llvm::Optional
Definition: APInt.h:33
llvm::FenceInst
An instruction for ordering other memory operations.
Definition: Instructions.h:444
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:893
initialize
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
Definition: TargetLibraryInfo.cpp:116
llvm::AtomicRMWInst::FIRST_BINOP
@ FIRST_BINOP
Definition: Instructions.h:768
HasCalls
@ HasCalls
Definition: AArch64InstrInfo.cpp:6343
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:241
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::AtomicOrdering::Monotonic
@ Monotonic
kTsanModuleCtorName
const char kTsanModuleCtorName[]
Definition: ThreadSanitizer.cpp:100
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::getOrCreateSanitizerCtorAndInitFunctions
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef())
Creates sanitizer constructor function lazily.
Definition: ModuleUtils.cpp:158
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
createOrdering
static ConstantInt * createOrdering(IRBuilder<> *IRB, AtomicOrdering ord)
Definition: ThreadSanitizer.cpp:723
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
SmallString.h
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
MAM
ModuleAnalysisManager MAM
Definition: PassBuilderBindings.cpp:61
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1518
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::ReplaceInstWithInst
void ReplaceInstWithInst(BasicBlock::InstListType &BIL, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Definition: BasicBlockUtils.cpp:468
TargetLibraryInfo.h
false
Definition: StackSlotColoring.cpp:142
shouldInstrumentReadWriteFromAddress
static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr)
Definition: ThreadSanitizer.cpp:409
llvm::ModuleThreadSanitizerPass::run
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: ThreadSanitizer.cpp:209
llvm::IntegerType
Class to represent integer types.
Definition: DerivedTypes.h:40
llvm::Instruction
Definition: Instruction.h:45
InstrProf.h
llvm::AtomicOrdering::Acquire
@ Acquire
llvm::Triple::getObjectFormat
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
Definition: Triple.h:340
llvm::AtomicRMWInst::Nand
@ Nand
*p = ~(old & v)
Definition: Instructions.h:748
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1796
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:925
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4376
ClInstrumentAtomics
static cl::opt< bool > ClInstrumentAtomics("tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden)
llvm::AtomicRMWInst::Xchg
@ Xchg
*p = v
Definition: Instructions.h:740
llvm::AtomicRMWInst::Add
@ Add
*p = old + v
Definition: Instructions.h:742
Type.h
llvm::MemSetInst
This class wraps the llvm.memset intrinsic.
Definition: IntrinsicInst.h:905
ClInstrumentReadBeforeWrite
static cl::opt< bool > ClInstrumentReadBeforeWrite("tsan-instrument-read-before-write", cl::init(false), cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::Hidden)
llvm::SmallString< 32 >
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
ClInstrumentMemIntrinsics
static cl::opt< bool > ClInstrumentMemIntrinsics("tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden)
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:190
llvm::AtomicOrdering
AtomicOrdering
Atomic ordering for LLVM's memory model.
Definition: AtomicOrdering.h:56
llvm::cl::opt< bool >
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:304
llvm::AtomicRMWInst::Sub
@ Sub
*p = old - v
Definition: Instructions.h:744
llvm::ThreadSanitizerPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Definition: ThreadSanitizer.cpp:201
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:465
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::IRBuilderBase::getInt32
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:478
llvm::DenseMap
Definition: DenseMap.h:714
llvm::AtomicOrdering::Unordered
@ Unordered
I
#define I(x, y, z)
Definition: MD5.cpp:59
StringExtras.h
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:928
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::AtomicRMWInst::Or
@ Or
*p = old | v
Definition: Instructions.h:750
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
ClCompoundReadBeforeWrite
static cl::opt< bool > ClCompoundReadBeforeWrite("tsan-compound-read-before-write", cl::init(false), cl::desc("Emit special compound instrumentation for reads-before-writes"), cl::Hidden)
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan", "ThreadSanitizer: detects data races.", false, false) INITIALIZE_PASS_END(ThreadSanitizerLegacyPass
ClInstrumentMemoryAccesses
static cl::opt< bool > ClInstrumentMemoryAccesses("tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden)
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::MDNode
Metadata node.
Definition: Metadata.h:906
llvm::PointerMayBeCaptured
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Definition: CaptureTracking.cpp:215
DataLayout.h
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
ClInstrumentFuncEntryExit
static cl::opt< bool > ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), cl::desc("Instrument function entry and exit"), cl::Hidden)
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::maybeMarkSanitizerLibraryCallNoBuiltin
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3225
llvm::AMDGPU::HSAMD::Kernel::Arg::Key::IsVolatile
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Definition: AMDGPUMetadata.h:194
ClHandleCxxExceptions
static cl::opt< bool > ClHandleCxxExceptions("tsan-handle-cxx-exceptions", cl::init(true), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::Hidden)
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:286
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:175
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:726
llvm::pdb::PDB_DataKind::Local
@ Local
llvm::AtomicOrdering::Release
@ Release
llvm::MachO::All
@ All
Definition: InterfaceFile.h:73
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::Type::getIntNTy
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:245
llvm::SyncScope::SingleThread
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:55
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::SectionName
Definition: DWARFSection.h:21
Success
#define Success
Definition: AArch64Disassembler.cpp:260
llvm::AtomicRMWInst::And
@ And
*p = old & v
Definition: Instructions.h:746
llvm::TypeSize
Definition: TypeSize.h:417
Function.h
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:776
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:221
ClDistinguishVolatile
static cl::opt< bool > ClDistinguishVolatile("tsan-distinguish-volatile", cl::init(false), cl::desc("Emit special instrumentation for accesses to volatiles"), cl::Hidden)
llvm::getInstrProfSectionName
std::string getInstrProfSectionName(InstrProfSectKind IPSK, Triple::ObjectFormatType OF, bool AddSegmentInfo=true)
Return the name of the profile section corresponding to IPSK.
Definition: InstrProf.cpp:175
Instructions.h
SmallVector.h
ModuleUtils.h
EscapeEnumerator.h
llvm::FunctionCallee
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
tsan
tsan
Definition: ThreadSanitizer.cpp:219
llvm::SmallVectorImpl< Instruction * >
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1475
llvm::AtomicRMWInst::LAST_BINOP
@ LAST_BINOP
Definition: Instructions.h:769
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVMContext.h
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::appendToGlobalCtors
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:66
llvm::cl::desc
Definition: CommandLine.h:412
isVtableAccess
static bool isVtableAccess(Instruction *I)
Definition: ThreadSanitizer.cpp:401
raw_ostream.h
BasicBlockUtils.h
InitializePasses.h
kTsanInitName
const char kTsanInitName[]
Definition: ThreadSanitizer.cpp:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:521
llvm::initializeThreadSanitizerLegacyPassPass
void initializeThreadSanitizerLegacyPassPass(PassRegistry &)
Debug.h
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:440
llvm::AtomicOrdering::NotAtomic
@ NotAtomic
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37