53#define DEBUG_TYPE "tsan"
56 "tsan-instrument-memory-accesses",
cl::init(
true),
60 cl::desc(
"Instrument function entry and exit"),
63 "tsan-handle-cxx-exceptions",
cl::init(
true),
64 cl::desc(
"Handle C++ exceptions (insert cleanup blocks for unwinding)"),
71 "tsan-instrument-memintrinsics",
cl::init(
true),
74 "tsan-distinguish-volatile",
cl::init(
false),
75 cl::desc(
"Emit special instrumentation for accesses to volatiles"),
78 "tsan-instrument-read-before-write",
cl::init(
false),
79 cl::desc(
"Do not eliminate read instrumentation for read-before-writes"),
82 "tsan-compound-read-before-write",
cl::init(
false),
83 cl::desc(
"Emit special compound instrumentation for reads-before-writes"),
86STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
87STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
89 "Number of reads ignored due to following writes");
90STATISTIC(NumAccessesWithBadSize,
"Number of accesses with bad size");
91STATISTIC(NumInstrumentedVtableWrites,
"Number of vtable ptr writes");
92STATISTIC(NumInstrumentedVtableReads,
"Number of vtable ptr reads");
94 "Number of reads from constant globals");
95STATISTIC(NumOmittedReadsFromVtable,
"Number of vtable reads");
96STATISTIC(NumOmittedNonCaptured,
"Number of accesses ignored due to capturing");
109struct ThreadSanitizer {
114 <<
"warning: Option -tsan-compound-read-before-write has no effect "
115 "when -tsan-instrument-read-before-write is set.\n";
124 struct InstructionInfo {
127 static constexpr unsigned kCompoundRW = (1U << 0);
129 explicit InstructionInfo(
Instruction *Inst) : Inst(Inst) {}
136 bool instrumentLoadOrStore(
const InstructionInfo &II,
const DataLayout &
DL);
142 bool addrPointsToConstantData(
Value *
Addr);
152 static const size_t kNumberOfAccessSizes = 5;
166 [kNumberOfAccessSizes];
175void insertModuleCtor(
Module &M) {
187 ThreadSanitizer TSan;
201 IntptrTy =
DL.getIntPtrType(Ctx);
207 TsanFuncEntry = M.getOrInsertFunction(
"__tsan_func_entry", Attr,
208 IRB.getVoidTy(), IRB.getInt8PtrTy());
210 M.getOrInsertFunction(
"__tsan_func_exit", Attr, IRB.getVoidTy());
211 TsanIgnoreBegin = M.getOrInsertFunction(
"__tsan_ignore_thread_begin", Attr,
214 M.getOrInsertFunction(
"__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
217 const unsigned ByteSize = 1U << i;
218 const unsigned BitSize = ByteSize * 8;
219 std::string ByteSizeStr = utostr(ByteSize);
220 std::string BitSizeStr = utostr(BitSize);
222 TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
226 TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
229 SmallString<64> UnalignedReadName(
"__tsan_unaligned_read" + ByteSizeStr);
230 TsanUnalignedRead[i] = M.getOrInsertFunction(
231 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
233 SmallString<64> UnalignedWriteName(
"__tsan_unaligned_write" + ByteSizeStr);
234 TsanUnalignedWrite[i] = M.getOrInsertFunction(
235 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
237 SmallString<64> VolatileReadName(
"__tsan_volatile_read" + ByteSizeStr);
238 TsanVolatileRead[i] = M.getOrInsertFunction(
239 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
241 SmallString<64> VolatileWriteName(
"__tsan_volatile_write" + ByteSizeStr);
242 TsanVolatileWrite[i] = M.getOrInsertFunction(
243 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
245 SmallString<64> UnalignedVolatileReadName(
"__tsan_unaligned_volatile_read" +
247 TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
248 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
251 "__tsan_unaligned_volatile_write" + ByteSizeStr);
252 TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
253 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
256 TsanCompoundRW[i] = M.getOrInsertFunction(
257 CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
259 SmallString<64> UnalignedCompoundRWName(
"__tsan_unaligned_read_write" +
261 TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
262 UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
266 SmallString<32> AtomicLoadName(
"__tsan_atomic" + BitSizeStr +
"_load");
268 M.getOrInsertFunction(AtomicLoadName,
270 BitSize <= 32, Attr),
274 using Idxs = std::vector<unsigned>;
275 Idxs Idxs2Or12 ((BitSize <= 32) ? Idxs({1, 2}) : Idxs({2}));
276 Idxs Idxs34Or1234((BitSize <= 32) ? Idxs({1, 2, 3, 4}) : Idxs({3, 4}));
277 SmallString<32> AtomicStoreName(
"__tsan_atomic" + BitSizeStr +
"_store");
278 TsanAtomicStore[i] =
M.getOrInsertFunction(
280 TLI.
getAttrList(&Ctx, Idxs2Or12,
true,
false, Attr),
281 IRB.getVoidTy(), PtrTy, Ty, OrdTy);
285 TsanAtomicRMW[
Op][i] =
nullptr;
286 const char *NamePart =
nullptr;
288 NamePart =
"_exchange";
290 NamePart =
"_fetch_add";
292 NamePart =
"_fetch_sub";
294 NamePart =
"_fetch_and";
296 NamePart =
"_fetch_or";
298 NamePart =
"_fetch_xor";
300 NamePart =
"_fetch_nand";
304 TsanAtomicRMW[
Op][i] =
M.getOrInsertFunction(
307 BitSize <= 32, Attr),
308 Ty, PtrTy, Ty, OrdTy);
312 "_compare_exchange_val");
313 TsanAtomicCAS[i] =
M.getOrInsertFunction(
316 BitSize <= 32, Attr),
317 Ty, PtrTy, Ty, Ty, OrdTy, OrdTy);
320 M.getOrInsertFunction(
"__tsan_vptr_update", Attr, IRB.getVoidTy(),
321 IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
322 TsanVptrLoad =
M.getOrInsertFunction(
"__tsan_vptr_read", Attr,
323 IRB.getVoidTy(), IRB.getInt8PtrTy());
324 TsanAtomicThreadFence =
M.getOrInsertFunction(
325 "__tsan_atomic_thread_fence",
327 IRB.getVoidTy(), OrdTy);
329 TsanAtomicSignalFence =
M.getOrInsertFunction(
330 "__tsan_atomic_signal_fence",
332 IRB.getVoidTy(), OrdTy);
335 M.getOrInsertFunction(
"__tsan_memmove", Attr, IRB.getInt8PtrTy(),
336 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
338 M.getOrInsertFunction(
"__tsan_memcpy", Attr, IRB.getInt8PtrTy(),
339 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
340 MemsetFn =
M.getOrInsertFunction(
343 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
347 if (
MDNode *
Tag =
I->getMetadata(LLVMContext::MD_tbaa))
348 return Tag->isTBAAVtableAccess();
356 Addr =
Addr->stripInBoundsOffsets();
359 if (GV->hasSection()) {
369 if (GV->getName().startswith(
"__llvm_gcov") ||
370 GV->getName().startswith(
"__llvm_gcda"))
377 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
385bool ThreadSanitizer::addrPointsToConstantData(
Value *
Addr) {
388 Addr =
GEP->getPointerOperand();
391 if (GV->isConstant()) {
393 NumOmittedReadsFromConstantGlobals++;
399 NumOmittedReadsFromVtable++;
418void ThreadSanitizer::chooseInstructionsToInstrument(
424 const bool IsWrite = isa<StoreInst>(*
I);
425 Value *
Addr = IsWrite ? cast<StoreInst>(
I)->getPointerOperand()
426 : cast<LoadInst>(
I)->getPointerOperand();
432 const auto WriteEntry = WriteTargets.
find(
Addr);
434 auto &WI =
All[WriteEntry->second];
437 const bool AnyVolatile =
439 cast<StoreInst>(WI.Inst)->isVolatile());
443 WI.Flags |= InstructionInfo::kCompoundRW;
444 NumOmittedReadsBeforeWrite++;
449 if (addrPointsToConstantData(
Addr)) {
460 NumOmittedNonCaptured++;
469 WriteTargets[
Addr] =
All.size() - 1;
480 if (isa<LoadInst>(
I) || isa<StoreInst>(
I))
485void ThreadSanitizer::InsertRuntimeIgnores(
Function &
F) {
487 IRB.CreateCall(TsanIgnoreBegin);
491 AtExit->CreateCall(TsanIgnoreEnd);
495bool ThreadSanitizer::sanitizeFunction(
Function &
F,
504 if (
F.hasFnAttribute(Attribute::Naked))
509 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
519 bool SanitizeFunction =
F.hasFnAttribute(Attribute::SanitizeThread);
524 for (
auto &Inst : BB) {
527 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
529 else if ((isa<CallInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst)) ||
530 isa<InvokeInst>(Inst)) {
531 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
533 if (isa<MemIntrinsic>(Inst))
536 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
540 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
DL);
549 for (
const auto &II : AllLoadsAndStores) {
550 Res |= instrumentLoadOrStore(II,
DL);
556 for (
auto *Inst : AtomicAccesses) {
557 Res |= instrumentAtomic(Inst,
DL);
561 for (
auto *Inst : MemIntrinCalls) {
562 Res |= instrumentMemIntrinsic(Inst);
565 if (
F.hasFnAttribute(
"sanitize_thread_no_checking_at_run_time")) {
566 assert(!
F.hasFnAttribute(Attribute::SanitizeThread));
568 InsertRuntimeIgnores(
F);
574 Value *ReturnAddress = IRB.CreateCall(
577 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
582 AtExit->CreateCall(TsanFuncExit, {});
589bool ThreadSanitizer::instrumentLoadOrStore(
const InstructionInfo &II,
592 const bool IsWrite = isa<StoreInst>(*II.Inst);
593 Value *
Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
594 : cast<LoadInst>(II.Inst)->getPointerOperand();
600 if (
Addr->isSwiftError())
603 int Idx = getMemoryAccessFuncIndex(OrigTy,
Addr,
DL);
608 Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
612 if (isa<VectorType>(StoredValue->
getType()))
613 StoredValue = IRB.CreateExtractElement(
616 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
618 IRB.CreateCall(TsanVptrUpdate,
619 {IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()),
620 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
621 NumInstrumentedVtableWrites++;
625 IRB.CreateCall(TsanVptrLoad,
626 IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()));
627 NumInstrumentedVtableReads++;
631 const Align Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlign()
632 : cast<LoadInst>(II.Inst)->getAlign();
633 const bool IsCompoundRW =
636 (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
637 : cast<LoadInst>(II.Inst)->isVolatile());
638 assert((!IsVolatile || !IsCompoundRW) &&
"Compound volatile invalid!");
644 OnAccessFunc = TsanCompoundRW[
Idx];
646 OnAccessFunc = IsWrite ? TsanVolatileWrite[
Idx] : TsanVolatileRead[
Idx];
648 OnAccessFunc = IsWrite ? TsanWrite[
Idx] : TsanRead[
Idx];
651 OnAccessFunc = TsanUnalignedCompoundRW[
Idx];
653 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[
Idx]
654 : TsanUnalignedVolatileRead[
Idx];
656 OnAccessFunc = IsWrite ? TsanUnalignedWrite[
Idx] : TsanUnalignedRead[
Idx];
658 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()));
659 if (IsCompoundRW || IsWrite)
660 NumInstrumentedWrites++;
661 if (IsCompoundRW || !IsWrite)
662 NumInstrumentedReads++;
691bool ThreadSanitizer::instrumentMemIntrinsic(
Instruction *
I) {
696 {IRB.CreatePointerCast(
M->getArgOperand(0), IRB.getInt8PtrTy()),
697 IRB.CreateIntCast(
M->getArgOperand(1), IRB.getInt32Ty(),
false),
698 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
699 I->eraseFromParent();
702 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
703 {IRB.CreatePointerCast(
M->getArgOperand(0), IRB.getInt8PtrTy()),
704 IRB.CreatePointerCast(
M->getArgOperand(1), IRB.getInt8PtrTy()),
705 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
706 I->eraseFromParent();
721 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
723 Type *OrigTy = LI->getType();
724 int Idx = getMemoryAccessFuncIndex(OrigTy,
Addr,
DL);
727 const unsigned ByteSize = 1U <<
Idx;
728 const unsigned BitSize = ByteSize * 8;
733 Value *
C = IRB.CreateCall(TsanAtomicLoad[
Idx], Args);
734 Value *Cast = IRB.CreateBitOrPointerCast(
C, OrigTy);
735 I->replaceAllUsesWith(Cast);
736 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(
I)) {
739 getMemoryAccessFuncIndex(
SI->getValueOperand()->getType(),
Addr,
DL);
742 const unsigned ByteSize = 1U <<
Idx;
743 const unsigned BitSize = ByteSize * 8;
747 IRB.CreateBitOrPointerCast(
SI->getValueOperand(), Ty),
754 getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(),
Addr,
DL);
760 const unsigned ByteSize = 1U <<
Idx;
761 const unsigned BitSize = ByteSize * 8;
765 IRB.CreateIntCast(RMWI->getValOperand(), Ty,
false),
771 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
772 int Idx = getMemoryAccessFuncIndex(OrigOldValTy,
Addr,
DL);
775 const unsigned ByteSize = 1U <<
Idx;
776 const unsigned BitSize = ByteSize * 8;
780 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
782 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
791 if (Ty != OrigOldValTy) {
793 OldVal = IRB.CreateIntToPtr(
C, OrigOldValTy);
798 Res = IRB.CreateInsertValue(Res,
Success, 1);
800 I->replaceAllUsesWith(Res);
801 I->eraseFromParent();
802 }
else if (
FenceInst *FI = dyn_cast<FenceInst>(
I)) {
805 ? TsanAtomicSignalFence
806 : TsanAtomicThreadFence;
813int ThreadSanitizer::getMemoryAccessFuncIndex(
Type *OrigTy,
Value *
Addr,
817 cast<PointerType>(
Addr->getType())->isOpaqueOrPointeeTypeMatches(OrigTy));
821 NumAccessesWithBadSize++;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
Module.h This file contains the declarations for the Module class.
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr)
static bool isVtableAccess(Instruction *I)
static bool isTsanAtomic(const Instruction *I)
const char kTsanModuleCtorName[]
static cl::opt< bool > ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), cl::desc("Instrument function entry and exit"), cl::Hidden)
static ConstantInt * createOrdering(IRBuilder<> *IRB, AtomicOrdering ord)
static cl::opt< bool > ClInstrumentMemIntrinsics("tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden)
const char kTsanInitName[]
static cl::opt< bool > ClDistinguishVolatile("tsan-distinguish-volatile", cl::init(false), cl::desc("Emit special instrumentation for accesses to volatiles"), cl::Hidden)
static cl::opt< bool > ClCompoundReadBeforeWrite("tsan-compound-read-before-write", cl::init(false), cl::desc("Emit special compound instrumentation for reads-before-writes"), cl::Hidden)
static cl::opt< bool > ClInstrumentAtomics("tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden)
static cl::opt< bool > ClHandleCxxExceptions("tsan-handle-cxx-exceptions", cl::init(true), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::Hidden)
static cl::opt< bool > ClInstrumentReadBeforeWrite("tsan-instrument-read-before-write", cl::init(false), cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::Hidden)
static cl::opt< bool > ClInstrumentMemoryAccesses("tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttributeList addFnAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a function attribute to the list.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
EscapeEnumerator - This is a little algorithm to find all escape points from a function so that "fina...
An instruction for ordering other memory operations.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
Triple - Helper class for working with autoconf configuration names.
ObjectFormatType getObjectFormat() const
Get the object format for this triple.
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
std::string getInstrProfSectionName(InstrProfSectKind IPSK, Triple::ObjectFormatType OF, bool AddSegmentInfo=true)
Return the name of the profile section corresponding to IPSK.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto reverse(ContainerTy &&C)
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
static void ensureDebugInfo(IRBuilder<> &IRB, const Function &F)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)