94#define DEBUG_TYPE "asan"
100 std::numeric_limits<uint64_t>::max();
141 "__asan_unregister_image_globals";
154 "__asan_stack_malloc_always_";
168 "__asan_option_detect_stack_use_after_return";
171 "__asan_shadow_memory_dynamic_address";
197 "asan-kernel",
cl::desc(
"Enable KernelAddressSanitizer instrumentation"),
202 cl::desc(
"Enable recovery mode (continue-after-error)."),
206 "asan-guard-against-version-mismatch",
212 cl::desc(
"instrument read instructions"),
216 "asan-instrument-writes",
cl::desc(
"instrument write instructions"),
225 "asan-instrument-atomics",
235 "asan-always-slow-path",
240 "asan-force-dynamic-shadow",
241 cl::desc(
"Load shadow address into a local variable for each function"),
246 cl::desc(
"Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
251 "asan-with-ifunc-suppress-remat",
252 cl::desc(
"Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
261 "asan-max-ins-per-bb",
cl::init(10000),
262 cl::desc(
"maximal number of instructions to instrument in any given BB"),
269 "asan-max-inline-poisoning-size",
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
275 "asan-use-after-return",
276 cl::desc(
"Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never,
"never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime,
"runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always,
"always",
285 "Always detect stack use after return.")),
289 cl::desc(
"Create redzones for byval "
290 "arguments (extra copy "
295 cl::desc(
"Check stack-use-after-scope"),
304 cl::desc(
"Handle C++ initializer order"),
308 "asan-detect-invalid-pointer-pair",
313 "asan-detect-invalid-pointer-cmp",
318 "asan-detect-invalid-pointer-sub",
323 "asan-realign-stack",
324 cl::desc(
"Realign stack to the value of this flag (power of two)"),
328 "asan-instrumentation-with-call-threshold",
329 cl::desc(
"If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
335 "asan-memory-access-callback-prefix",
340 "asan-kernel-mem-intrinsic-prefix",
346 cl::desc(
"instrument dynamic allocas"),
350 "asan-skip-promotable-allocas",
355 "asan-constructor-kind",
356 cl::desc(
"Sets the ASan constructor kind"),
359 "Use global constructors")),
366 cl::desc(
"scale of asan shadow mapping"),
371 cl::desc(
"offset of asan shadow mapping [EXPERIMENTAL]"),
385 "asan-opt-same-temp",
cl::desc(
"Instrument the same temp just once"),
389 cl::desc(
"Don't instrument scalar globals"),
393 "asan-opt-stack",
cl::desc(
"Don't instrument scalar stack variables"),
397 "asan-stack-dynamic-alloca",
402 "asan-force-experiment",
408 cl::desc(
"Use private aliases for global variables"),
413 cl::desc(
"Use odr indicators to improve ODR reporting"),
418 cl::desc(
"Use linker features to support dead "
419 "code stripping of globals"),
426 cl::desc(
"Place ASan constructors in comdat sections"),
430 "asan-destructor-kind",
431 cl::desc(
"Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
435 "Use global destructors")),
455STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
458 "Number of optimized accesses to global vars");
460 "Number of optimized accesses to stack vars");
469struct ShadowMapping {
480 bool IsAndroid = TargetTriple.
isAndroid();
483 bool IsMacOS = TargetTriple.
isMacOSX();
486 bool IsPS = TargetTriple.
isPS();
493 bool IsMIPS32 = TargetTriple.
isMIPS32();
494 bool IsMIPS64 = TargetTriple.
isMIPS64();
495 bool IsArmOrThumb = TargetTriple.
isARM() || TargetTriple.
isThumb();
503 bool IsAMDGPU = TargetTriple.
isAMDGPU();
505 ShadowMapping Mapping;
512 if (LongSize == 32) {
515 else if (IsMIPSN32ABI)
527 else if (IsEmscripten)
540 else if (IsFreeBSD && IsAArch64)
542 else if (IsFreeBSD && !IsMIPS64) {
547 }
else if (IsNetBSD) {
554 else if (IsLinux && IsX86_64) {
560 }
else if (IsWindows && IsX86_64) {
566 else if (IsMacOS && IsAArch64)
570 else if (IsLoongArch64)
594 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
595 !IsRISCV64 && !IsLoongArch64 &&
596 !(Mapping.Offset & (Mapping.Offset - 1)) &&
598 bool IsAndroidWithIfuncSupport =
600 Mapping.InGlobal =
ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
608 int *MappingScale,
bool *OrShadowOffset) {
610 *ShadowBase = Mapping.Offset;
611 *MappingScale = Mapping.Scale;
612 *OrShadowOffset = Mapping.OrShadowOffset;
622 uint8_t AccessSizeIndex)
626 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
627 CompileKernel(CompileKernel) {}
634 return std::max(32U, 1U << MappingScale);
653class RuntimeCallInserter {
655 bool TrackInsertedCalls =
false;
659 RuntimeCallInserter(
Function &Fn) : OwnerFn(&Fn) {
663 TrackInsertedCalls =
true;
667 ~RuntimeCallInserter() {
668 if (InsertedCalls.
empty())
670 assert(TrackInsertedCalls &&
"Calls were wrongly tracked");
673 for (
CallInst *CI : InsertedCalls) {
675 assert(BB &&
"Instruction doesn't belong to a BasicBlock");
677 "Instruction doesn't belong to the expected Function!");
685 if (Colors.
size() != 1) {
687 "Instruction's BasicBlock is not monochromatic");
694 if (EHPad && EHPad->
isEHPad()) {
698 OB, CI->getIterator());
699 NewCall->copyMetadata(*CI);
700 CI->replaceAllUsesWith(NewCall);
701 CI->eraseFromParent();
712 if (TrackInsertedCalls)
713 InsertedCalls.push_back(Inst);
719struct AddressSanitizer {
721 int InstrumentationWithCallsThreshold,
722 uint32_t MaxInlinePoisoningSize,
bool CompileKernel =
false,
723 bool Recover =
false,
bool UseAfterScope =
false,
725 AsanDetectStackUseAfterReturnMode::Runtime)
734 InstrumentationWithCallsThreshold(
737 : InstrumentationWithCallsThreshold),
740 : MaxInlinePoisoningSize) {
741 C = &(
M.getContext());
742 DL = &
M.getDataLayout();
743 LongSize =
M.getDataLayout().getPointerSizeInBits();
745 PtrTy = PointerType::getUnqual(*C);
747 TargetTriple =
Triple(
M.getTargetTriple());
751 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
759 bool isInterestingAlloca(
const AllocaInst &AI);
767 const DataLayout &DL, RuntimeCallInserter &RTCI);
768 void instrumentPointerComparisonOrSubtraction(
Instruction *
I,
769 RuntimeCallInserter &RTCI);
772 uint32_t TypeStoreSize,
bool IsWrite,
774 RuntimeCallInserter &RTCI);
777 uint32_t TypeStoreSize,
bool IsWrite,
778 Value *SizeArgument);
783 TypeSize TypeStoreSize,
bool IsWrite,
784 Value *SizeArgument,
bool UseCalls,
786 RuntimeCallInserter &RTCI);
787 void instrumentMaskedLoadOrStore(AddressSanitizer *
Pass,
const DataLayout &DL,
791 Type *OpType,
bool IsWrite,
792 Value *SizeArgument,
bool UseCalls,
793 uint32_t Exp, RuntimeCallInserter &RTCI);
797 bool IsWrite,
size_t AccessSizeIndex,
799 RuntimeCallInserter &RTCI);
800 void instrumentMemIntrinsic(
MemIntrinsic *
MI, RuntimeCallInserter &RTCI);
802 bool suppressInstrumentationSiteForDebug(
int &Instrumented);
804 bool maybeInsertAsanInitAtFunctionEntry(
Function &
F);
805 bool maybeInsertDynamicShadowAtFunctionEntry(
Function &
F);
806 void markEscapedLocalAllocas(
Function &
F);
809 friend struct FunctionStackPoisoner;
819 struct FunctionStateRAII {
820 AddressSanitizer *
Pass;
822 FunctionStateRAII(AddressSanitizer *
Pass) :
Pass(
Pass) {
824 "last pass forgot to clear cache");
828 ~FunctionStateRAII() {
829 Pass->LocalDynamicShadow =
nullptr;
830 Pass->ProcessedAllocas.clear();
846 ShadowMapping Mapping;
860 Value *LocalDynamicShadow =
nullptr;
866 int InstrumentationWithCallsThreshold;
870class ModuleAddressSanitizer {
872 ModuleAddressSanitizer(
Module &M,
bool InsertVersionCheck,
873 bool CompileKernel =
false,
bool Recover =
false,
874 bool UseGlobalsGC =
true,
bool UseOdrIndicator =
true,
882 : InsertVersionCheck),
884 UseGlobalsGC(UseGlobalsGC &&
ClUseGlobalsGC && !this->CompileKernel),
899 UseCtorComdat(UseGlobalsGC &&
ClWithComdat && !this->CompileKernel),
900 DestructorKind(DestructorKind),
904 C = &(
M.getContext());
905 int LongSize =
M.getDataLayout().getPointerSizeInBits();
907 PtrTy = PointerType::getUnqual(*C);
908 TargetTriple =
Triple(
M.getTargetTriple());
913 assert(this->DestructorKind != AsanDtorKind::Invalid);
916 bool instrumentModule();
919 void initializeCallbacks();
921 void instrumentGlobals(
IRBuilder<> &IRB,
bool *CtorComdat);
928 const std::string &UniqueModuleId);
933 InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB,
945 bool ShouldUseMachOGlobalsSection()
const;
946 StringRef getGlobalMetadataSection()
const;
947 void poisonOneInitializer(
Function &GlobalInit);
948 void createInitializerPoisonCalls();
949 uint64_t getMinRedzoneSizeForGlobal()
const {
953 int GetAsanVersion()
const;
958 bool InsertVersionCheck;
961 bool UsePrivateAlias;
962 bool UseOdrIndicator;
970 ShadowMapping Mapping;
980 Function *AsanCtorFunction =
nullptr;
981 Function *AsanDtorFunction =
nullptr;
994struct FunctionStackPoisoner :
public InstVisitor<FunctionStackPoisoner> {
996 AddressSanitizer &ASan;
997 RuntimeCallInserter &RTCI;
1002 ShadowMapping Mapping;
1011 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1015 struct AllocaPoisonCall {
1023 bool HasUntracedLifetimeIntrinsic =
false;
1030 bool HasInlineAsm =
false;
1031 bool HasReturnsTwiceCall =
false;
1034 FunctionStackPoisoner(
Function &F, AddressSanitizer &ASan,
1035 RuntimeCallInserter &RTCI)
1036 :
F(
F), ASan(ASan), RTCI(RTCI),
1038 IntptrTy(ASan.IntptrTy), IntptrPtrTy(
PointerType::
get(IntptrTy, 0)),
1039 Mapping(ASan.Mapping),
1048 copyArgsPassedByValToAllocas();
1053 if (AllocaVec.
empty() && DynamicAllocaVec.
empty())
return false;
1055 initializeCallbacks(*
F.getParent());
1057 if (HasUntracedLifetimeIntrinsic) {
1061 StaticAllocaPoisonCallVec.
clear();
1062 DynamicAllocaPoisonCallVec.
clear();
1065 processDynamicAllocas();
1066 processStaticAllocas();
1077 void copyArgsPassedByValToAllocas();
1082 void processStaticAllocas();
1083 void processDynamicAllocas();
1085 void createDynamicAllocasInitStorage();
1103 void unpoisonDynamicAllocasBeforeInst(
Instruction *InstBefore,
1104 Value *SavedStack) {
1111 if (!isa<ReturnInst>(InstBefore)) {
1113 InstBefore->
getModule(), Intrinsic::get_dynamic_area_offset,
1122 RTCI.createRuntimeCall(
1123 IRB, AsanAllocasUnpoisonFunc,
1124 {IRB.
CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1128 void unpoisonDynamicAllocas() {
1130 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1132 for (
Instruction *StackRestoreInst : StackRestoreVec)
1133 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1134 StackRestoreInst->getOperand(0));
1147 void handleDynamicAllocaCall(
AllocaInst *AI);
1153 const auto *STy = dyn_cast<StructType>(AllocaType);
1154 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1155 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1159 if (AllocaVec.
empty())
1177 if (
ID == Intrinsic::stackrestore) StackRestoreVec.
push_back(&
II);
1178 if (
ID == Intrinsic::localescape) LocalEscapeCall = &
II;
1179 if (!ASan.UseAfterScope)
1181 if (!
II.isLifetimeStartOrEnd())
1184 auto *
Size = cast<ConstantInt>(
II.getArgOperand(0));
1186 if (
Size->isMinusOne())
return;
1189 const uint64_t SizeValue =
Size->getValue().getLimitedValue();
1190 if (SizeValue == ~0ULL ||
1198 HasUntracedLifetimeIntrinsic =
true;
1202 if (!ASan.isInterestingAlloca(*AI))
1204 bool DoPoison = (
ID == Intrinsic::lifetime_end);
1205 AllocaPoisonCall APC = {&
II, AI, SizeValue, DoPoison};
1207 StaticAllocaPoisonCallVec.
push_back(APC);
1209 DynamicAllocaPoisonCallVec.
push_back(APC);
1213 if (
CallInst *CI = dyn_cast<CallInst>(&CB)) {
1214 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1215 HasReturnsTwiceCall |= CI->canReturnTwice();
1220 void initializeCallbacks(
Module &M);
1247 OS, MapClassName2PassName);
1259 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1260 ConstructorKind(ConstructorKind) {}
1269 ModuleAddressSanitizer ModuleSanitizer(
1271 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1277 AddressSanitizer FunctionSanitizer(
1282 Modified |= FunctionSanitizer.instrumentFunction(
F, &TLI);
1284 Modified |= ModuleSanitizer.instrumentModule();
1305 if (
G->getName().starts_with(
"llvm.") ||
1307 G->getName().starts_with(
"__llvm_gcov_ctr") ||
1309 G->getName().starts_with(
"__llvm_rtti_proxy"))
1322 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
1324 if (AddrSpace == 3 || AddrSpace == 5)
1331 Shadow = IRB.
CreateLShr(Shadow, Mapping.Scale);
1332 if (Mapping.Offset == 0)
return Shadow;
1335 if (LocalDynamicShadow)
1336 ShadowBase = LocalDynamicShadow;
1338 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1339 if (Mapping.OrShadowOffset)
1340 return IRB.
CreateOr(Shadow, ShadowBase);
1342 return IRB.
CreateAdd(Shadow, ShadowBase);
1347 RuntimeCallInserter &RTCI) {
1349 if (isa<MemTransferInst>(
MI)) {
1350 RTCI.createRuntimeCall(
1351 IRB, isa<MemMoveInst>(
MI) ? AsanMemmove : AsanMemcpy,
1355 }
else if (isa<MemSetInst>(
MI)) {
1356 RTCI.createRuntimeCall(
1362 MI->eraseFromParent();
1366bool AddressSanitizer::isInterestingAlloca(
const AllocaInst &AI) {
1367 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1369 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1370 return PreviouslySeenAllocaInfo->getSecond();
1372 bool IsInteresting =
1385 !(SSGI && SSGI->
isSafe(AI)));
1387 ProcessedAllocas[&AI] = IsInteresting;
1388 return IsInteresting;
1393 Type *PtrTy = cast<PointerType>(
Ptr->getType()->getScalarType());
1402 if (
Ptr->isSwiftError())
1408 if (
auto AI = dyn_cast_or_null<AllocaInst>(
Ptr))
1419void AddressSanitizer::getInterestingMemoryOperands(
1422 if (LocalDynamicShadow ==
I)
1425 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
1428 Interesting.
emplace_back(
I, LI->getPointerOperandIndex(),
false,
1429 LI->getType(), LI->getAlign());
1430 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(
I)) {
1434 SI->getValueOperand()->getType(),
SI->getAlign());
1438 Interesting.
emplace_back(
I, RMW->getPointerOperandIndex(),
true,
1439 RMW->getValOperand()->getType(), std::nullopt);
1443 Interesting.
emplace_back(
I, XCHG->getPointerOperandIndex(),
true,
1444 XCHG->getCompareOperand()->getType(),
1446 }
else if (
auto CI = dyn_cast<CallInst>(
I)) {
1447 switch (CI->getIntrinsicID()) {
1448 case Intrinsic::masked_load:
1449 case Intrinsic::masked_store:
1450 case Intrinsic::masked_gather:
1451 case Intrinsic::masked_scatter: {
1452 bool IsWrite = CI->getType()->isVoidTy();
1454 unsigned OpOffset = IsWrite ? 1 : 0;
1458 auto BasePtr = CI->getOperand(OpOffset);
1459 if (ignoreAccess(
I, BasePtr))
1461 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1464 if (
auto *
Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1465 Alignment =
Op->getMaybeAlignValue();
1466 Value *
Mask = CI->getOperand(2 + OpOffset);
1467 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, Mask);
1470 case Intrinsic::masked_expandload:
1471 case Intrinsic::masked_compressstore: {
1472 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1473 unsigned OpOffset = IsWrite ? 1 : 0;
1476 auto BasePtr = CI->getOperand(OpOffset);
1477 if (ignoreAccess(
I, BasePtr))
1480 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1483 Value *
Mask = CI->getOperand(1 + OpOffset);
1486 Value *ExtMask =
IB.CreateZExt(Mask, ExtTy);
1487 Value *EVL =
IB.CreateAddReduce(ExtMask);
1488 Value *TrueMask = ConstantInt::get(
Mask->getType(), 1);
1489 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1493 case Intrinsic::vp_load:
1494 case Intrinsic::vp_store:
1495 case Intrinsic::experimental_vp_strided_load:
1496 case Intrinsic::experimental_vp_strided_store: {
1497 auto *VPI = cast<VPIntrinsic>(CI);
1498 unsigned IID = CI->getIntrinsicID();
1499 bool IsWrite = CI->getType()->isVoidTy();
1502 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1503 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1504 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*
DL);
1505 Value *Stride =
nullptr;
1506 if (IID == Intrinsic::experimental_vp_strided_store ||
1507 IID == Intrinsic::experimental_vp_strided_load) {
1508 Stride = VPI->getOperand(PtrOpNo + 1);
1513 if (!isa<ConstantInt>(Stride) ||
1514 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1515 Alignment =
Align(1);
1517 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1518 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1522 case Intrinsic::vp_gather:
1523 case Intrinsic::vp_scatter: {
1524 auto *VPI = cast<VPIntrinsic>(CI);
1525 unsigned IID = CI->getIntrinsicID();
1526 bool IsWrite = IID == Intrinsic::vp_scatter;
1529 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1530 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1531 MaybeAlign Alignment = VPI->getPointerAlignment();
1532 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1533 VPI->getMaskParam(),
1534 VPI->getVectorLengthParam());
1538 for (
unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1540 ignoreAccess(
I, CI->getArgOperand(ArgNo)))
1542 Type *Ty = CI->getParamByValType(ArgNo);
1550 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1557 if (
ICmpInst *Cmp = dyn_cast<ICmpInst>(
I)) {
1558 if (!Cmp->isRelational())
1572 if (BO->getOpcode() != Instruction::Sub)
1585 if (!
G->hasInitializer())
1588 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().IsDynInit)
1594void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1597 FunctionCallee F = isa<ICmpInst>(
I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1598 Value *
Param[2] = {
I->getOperand(0),
I->getOperand(1)};
1599 for (
Value *&i : Param) {
1600 if (i->getType()->isPointerTy())
1603 RTCI.createRuntimeCall(IRB,
F, Param);
1609 TypeSize TypeStoreSize,
bool IsWrite,
1610 Value *SizeArgument,
bool UseCalls,
1611 uint32_t Exp, RuntimeCallInserter &RTCI) {
1616 switch (FixedSize) {
1622 if (!Alignment || *Alignment >= Granularity ||
1623 *Alignment >= FixedSize / 8)
1624 return Pass->instrumentAddress(
I, InsertBefore,
Addr, Alignment,
1625 FixedSize, IsWrite,
nullptr, UseCalls,
1629 Pass->instrumentUnusualSizeOrAlignment(
I, InsertBefore,
Addr, TypeStoreSize,
1630 IsWrite,
nullptr, UseCalls, Exp, RTCI);
1633void AddressSanitizer::instrumentMaskedLoadOrStore(
1636 MaybeAlign Alignment,
unsigned Granularity,
Type *OpType,
bool IsWrite,
1638 RuntimeCallInserter &RTCI) {
1639 auto *VTy = cast<VectorType>(OpType);
1640 TypeSize ElemTypeSize =
DL.getTypeStoreSizeInBits(VTy->getScalarType());
1641 auto Zero = ConstantInt::get(IntptrTy, 0);
1649 Value *IsEVLZero =
IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1651 IB.SetInsertPoint(LoopInsertBefore);
1653 EVL =
IB.CreateZExtOrTrunc(EVL, IntptrTy);
1656 Value *
EC =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1657 EVL =
IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1659 EVL =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1664 Stride =
IB.CreateZExtOrTrunc(Stride, IntptrTy);
1669 if (
auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1670 if (MaskElemC->isZero())
1681 Value *InstrumentedAddress;
1682 if (isa<VectorType>(
Addr->getType())) {
1684 cast<VectorType>(
Addr->getType())->getElementType()->isPointerTy() &&
1685 "Expected vector of pointer.");
1687 }
else if (Stride) {
1694 Alignment, Granularity, ElemTypeSize, IsWrite,
1695 SizeArgument, UseCalls, Exp, RTCI);
1702 RuntimeCallInserter &RTCI) {
1723 isSafeAccess(ObjSizeVis,
Addr,
O.TypeStoreSize)) {
1724 NumOptimizedAccessesToGlobalVar++;
1732 isSafeAccess(ObjSizeVis,
Addr,
O.TypeStoreSize)) {
1733 NumOptimizedAccessesToStackVar++;
1739 NumInstrumentedWrites++;
1741 NumInstrumentedReads++;
1743 unsigned Granularity = 1 << Mapping.Scale;
1745 instrumentMaskedLoadOrStore(
this,
DL, IntptrTy,
O.MaybeMask,
O.MaybeEVL,
1746 O.MaybeStride,
O.getInsn(),
Addr,
O.Alignment,
1747 Granularity,
O.OpType,
O.IsWrite,
nullptr,
1748 UseCalls, Exp, RTCI);
1751 Granularity,
O.TypeStoreSize,
O.IsWrite,
nullptr,
1752 UseCalls, Exp, RTCI);
1758 size_t AccessSizeIndex,
1759 Value *SizeArgument,
1761 RuntimeCallInserter &RTCI) {
1767 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1768 {
Addr, SizeArgument});
1770 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1771 {
Addr, SizeArgument, ExpVal});
1774 Call = RTCI.createRuntimeCall(
1775 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex],
Addr);
1777 Call = RTCI.createRuntimeCall(
1778 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {
Addr, ExpVal});
1781 Call->setCannotMerge();
1788 size_t Granularity =
static_cast<size_t>(1) << Mapping.Scale;
1790 Value *LastAccessedByte =
1791 IRB.
CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1793 if (TypeStoreSize / 8 > 1)
1795 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1803Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1805 uint32_t TypeStoreSize,
bool IsWrite,
Value *SizeArgument) {
1809 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
1812 return InsertBefore;
1817 Value *IsSharedOrPrivate = IRB.
CreateOr(IsShared, IsPrivate);
1819 Value *AddrSpaceZeroLanding =
1821 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1822 return InsertBefore;
1838 Trm->getParent()->setName(
"asan.report");
1849void AddressSanitizer::instrumentAddress(
Instruction *OrigIns,
1852 uint32_t TypeStoreSize,
bool IsWrite,
1853 Value *SizeArgument,
bool UseCalls,
1855 RuntimeCallInserter &RTCI) {
1856 if (TargetTriple.isAMDGPU()) {
1857 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore,
Addr,
1858 TypeStoreSize, IsWrite, SizeArgument);
1867 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1872 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1879 RTCI.createRuntimeCall(
1880 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1882 RTCI.createRuntimeCall(
1883 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1884 {AddrLong, ConstantInt::get(IRB.
getInt32Ty(), Exp)});
1891 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1893 std::max<uint64_t>(Alignment.
valueOrOne().
value() >> Mapping.Scale, 1);
1898 size_t Granularity = 1ULL << Mapping.Scale;
1901 bool GenSlowPath = (
ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1903 if (TargetTriple.isAMDGCN()) {
1905 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1908 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1909 }
else if (GenSlowPath) {
1914 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1917 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1932 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1941void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1943 TypeSize TypeStoreSize,
bool IsWrite,
Value *SizeArgument,
bool UseCalls,
1944 uint32_t Exp, RuntimeCallInserter &RTCI) {
1952 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1955 RTCI.createRuntimeCall(
1956 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
1970void ModuleAddressSanitizer::poisonOneInitializer(
Function &GlobalInit) {
1976 Value *ModuleNameAddr =
1978 IRB.
CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1981 for (
auto &BB : GlobalInit)
1986void ModuleAddressSanitizer::createInitializerPoisonCalls() {
1996 if (isa<ConstantAggregateZero>(
OP))
continue;
2002 auto *Priority = cast<ConstantInt>(CS->
getOperand(0));
2006 poisonOneInitializer(*
F);
2012ModuleAddressSanitizer::getExcludedAliasedGlobal(
const GlobalAlias &GA)
const {
2017 assert(CompileKernel &&
"Only expecting to be called when compiling kernel");
2024 return dyn_cast<GlobalVariable>(
C->stripPointerCastsAndAliases());
2029bool ModuleAddressSanitizer::shouldInstrumentGlobal(
GlobalVariable *
G)
const {
2030 Type *Ty =
G->getValueType();
2033 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().NoAddress)
2035 if (!Ty->
isSized())
return false;
2036 if (!
G->hasInitializer())
return false;
2038 if (
G->getAddressSpace() &&
2045 if (
G->isThreadLocal())
return false;
2047 if (
G->getAlign() && *
G->getAlign() > getMinRedzoneSizeForGlobal())
return false;
2053 if (!TargetTriple.isOSBinFormatCOFF()) {
2054 if (!
G->hasExactDefinition() ||
G->hasComdat())
2058 if (
G->isInterposable())
2062 if (
G->hasAvailableExternallyLinkage())
2069 switch (
C->getSelectionKind()) {
2080 if (
G->hasSection()) {
2090 if (Section ==
"llvm.metadata")
return false;
2097 if (
Section.starts_with(
".preinit_array") ||
2098 Section.starts_with(
".init_array") ||
2099 Section.starts_with(
".fini_array")) {
2105 if (TargetTriple.isOSBinFormatELF()) {
2107 [](
char c) {
return llvm::isAlnum(c) || c ==
'_'; }))
2119 if (TargetTriple.isOSBinFormatCOFF() &&
Section.contains(
'$')) {
2120 LLVM_DEBUG(
dbgs() <<
"Ignoring global in sorted section (contains '$'): "
2125 if (TargetTriple.isOSBinFormatMachO()) {
2127 unsigned TAA = 0, StubSize = 0;
2130 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2135 if (ParsedSegment ==
"__OBJC" ||
2136 (ParsedSegment ==
"__DATA" && ParsedSection.
starts_with(
"__objc_"))) {
2148 if (ParsedSegment ==
"__DATA" && ParsedSection ==
"__cfstring") {
2161 if (CompileKernel) {
2164 if (
G->getName().starts_with(
"__"))
2174bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection()
const {
2175 if (!TargetTriple.isOSBinFormatMachO())
2178 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2180 if (TargetTriple.isiOS() && !TargetTriple.isOSVersionLT(9))
2182 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2184 if (TargetTriple.isDriverKit())
2186 if (TargetTriple.isXROS())
2192StringRef ModuleAddressSanitizer::getGlobalMetadataSection()
const {
2193 switch (TargetTriple.getObjectFormat()) {
2203 "ModuleAddressSanitizer not implemented for object file format");
2210void ModuleAddressSanitizer::initializeCallbacks() {
2216 AsanUnpoisonGlobals =
2220 AsanRegisterGlobals =
M.getOrInsertFunction(
2222 AsanUnregisterGlobals =
M.getOrInsertFunction(
2227 AsanRegisterImageGlobals =
M.getOrInsertFunction(
2229 AsanUnregisterImageGlobals =
M.getOrInsertFunction(
2232 AsanRegisterElfGlobals =
2234 IntptrTy, IntptrTy, IntptrTy);
2235 AsanUnregisterElfGlobals =
2237 IntptrTy, IntptrTy, IntptrTy);
2242void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2247 if (!
G->hasName()) {
2251 G->setName(
genName(
"anon_global"));
2254 if (!InternalSuffix.
empty() &&
G->hasLocalLinkage()) {
2255 std::string
Name = std::string(
G->getName());
2256 Name += InternalSuffix;
2257 C =
M.getOrInsertComdat(
Name);
2259 C =
M.getOrInsertComdat(
G->getName());
2265 if (TargetTriple.isOSBinFormatCOFF()) {
2267 if (
G->hasPrivateLinkage())
2280ModuleAddressSanitizer::CreateMetadataGlobal(
Constant *Initializer,
2282 auto Linkage = TargetTriple.isOSBinFormatMachO()
2286 M, Initializer->
getType(),
false, Linkage, Initializer,
2288 Metadata->setSection(getGlobalMetadataSection());
2295Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2299 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2307void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2311 auto &
DL =
M.getDataLayout();
2314 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2315 Constant *Initializer = MetadataInitializers[i];
2319 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2325 unsigned SizeOfGlobalStruct =
DL.getTypeAllocSize(Initializer->
getType());
2327 "global metadata will not be padded appropriately");
2330 SetComdatForGlobalMetadata(
G,
Metadata,
"");
2335 if (!MetadataGlobals.empty())
2339void ModuleAddressSanitizer::instrumentGlobalsELF(
2342 const std::string &UniqueModuleId) {
2349 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2352 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2355 CreateMetadataGlobal(MetadataInitializers[i],
G->getName());
2357 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2360 if (UseComdatForGlobalsGC)
2361 SetComdatForGlobalMetadata(
G,
Metadata, UniqueModuleId);
2366 if (!MetadataGlobals.empty())
2383 "__start_" + getGlobalMetadataSection());
2387 "__stop_" + getGlobalMetadataSection());
2401 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2408void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2419 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2420 Constant *Initializer = MetadataInitializers[i];
2426 auto LivenessBinder =
2431 Twine(
"__asan_binder_") +
G->getName());
2432 Liveness->
setSection(
"__DATA,__asan_liveness,regular,live_support");
2433 LivenessGlobals[i] = Liveness;
2440 if (!LivenessGlobals.empty())
2462 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2467void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2471 unsigned N = ExtendedGlobals.
size();
2481 if (Mapping.Scale > 3)
2482 AllGlobals->setAlignment(
Align(1ULL << Mapping.Scale));
2487 ConstantInt::get(IntptrTy,
N)});
2493 IrbDtor.CreateCall(AsanUnregisterGlobals,
2495 ConstantInt::get(IntptrTy,
N)});
2504void ModuleAddressSanitizer::instrumentGlobals(
IRBuilder<> &IRB,
2509 if (CompileKernel) {
2510 for (
auto &GA :
M.aliases()) {
2512 AliasedGlobalExclusions.
insert(GV);
2517 for (
auto &
G :
M.globals()) {
2518 if (!AliasedGlobalExclusions.
count(&
G) && shouldInstrumentGlobal(&
G))
2522 size_t n = GlobalsToChange.
size();
2523 auto &
DL =
M.getDataLayout();
2537 IntptrTy, IntptrTy, IntptrTy);
2541 for (
size_t i = 0; i < n; i++) {
2545 if (
G->hasSanitizerMetadata())
2546 MD =
G->getSanitizerMetadata();
2551 std::string NameForGlobal =
G->getName().str();
2556 Type *Ty =
G->getValueType();
2557 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
2570 M, NewTy,
G->isConstant(), Linkage, NewInitializer,
"",
G,
2571 G->getThreadLocalMode(),
G->getAddressSpace());
2581 if (TargetTriple.isOSBinFormatMachO() && !
G->hasSection() &&
2583 auto Seq = dyn_cast<ConstantDataSequential>(
G->getInitializer());
2584 if (Seq && Seq->isCString())
2585 NewGlobal->
setSection(
"__TEXT,__asan_cstring,regular");
2596 G->replaceAllUsesWith(
2599 G->eraseFromParent();
2600 NewGlobals[i] = NewGlobal;
2605 bool CanUsePrivateAliases =
2606 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2607 TargetTriple.isOSBinFormatWasm();
2608 if (CanUsePrivateAliases && UsePrivateAlias) {
2611 InstrumentedGlobal =
2619 }
else if (UseOdrIndicator) {
2622 auto *ODRIndicatorSym =
2631 ODRIndicatorSym->setAlignment(
Align(1));
2632 ODRIndicator = ODRIndicatorSym;
2638 ConstantInt::get(IntptrTy, SizeInBytes),
2639 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2642 ConstantInt::get(IntptrTy, MD.
IsDynInit),
2648 Initializers[i] = Initializer;
2654 for (
size_t i = 0; i < n; i++) {
2656 if (
G->getName().empty())
continue;
2661 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2668 }
else if (n == 0) {
2671 *CtorComdat = TargetTriple.isOSBinFormatELF();
2673 *CtorComdat =
false;
2674 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2675 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2676 }
else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2677 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2679 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2685 createInitializerPoisonCalls();
2691ModuleAddressSanitizer::getRedzoneSizeForGlobal(
uint64_t SizeInBytes)
const {
2692 constexpr uint64_t kMaxRZ = 1 << 18;
2693 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2696 if (SizeInBytes <= MinRZ / 2) {
2700 RZ = MinRZ - SizeInBytes;
2703 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2706 if (SizeInBytes % MinRZ)
2707 RZ += MinRZ - (SizeInBytes % MinRZ);
2710 assert((RZ + SizeInBytes) % MinRZ == 0);
2715int ModuleAddressSanitizer::GetAsanVersion()
const {
2716 int LongSize =
M.getDataLayout().getPointerSizeInBits();
2721 Version += (LongSize == 32 && isAndroid);
2736bool ModuleAddressSanitizer::instrumentModule() {
2737 initializeCallbacks();
2742 if (CompileKernel) {
2747 std::string AsanVersion = std::to_string(GetAsanVersion());
2748 std::string VersionCheckName =
2750 std::tie(AsanCtorFunction, std::ignore) =
2753 {}, VersionCheckName);
2757 bool CtorComdat =
true;
2760 if (AsanCtorFunction) {
2761 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2762 instrumentGlobals(IRB, &CtorComdat);
2765 instrumentGlobals(IRB, &CtorComdat);
2774 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2775 if (AsanCtorFunction) {
2779 if (AsanDtorFunction) {
2784 if (AsanCtorFunction)
2786 if (AsanDtorFunction)
2797 for (
int Exp = 0;
Exp < 2;
Exp++) {
2798 for (
size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2799 const std::string TypeStr = AccessIsWrite ?
"store" :
"load";
2800 const std::string ExpStr =
Exp ?
"exp_" :
"";
2801 const std::string EndingStr = Recover ?
"_noabort" :
"";
2810 Args1.push_back(ExpType);
2811 if (
auto AK = TLI->getExtAttrForI32Param(
false)) {
2816 AsanErrorCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2820 AsanMemoryAccessCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2825 AccessSizeIndex++) {
2826 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2827 AsanErrorCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2828 M.getOrInsertFunction(
2832 AsanMemoryAccessCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2833 M.getOrInsertFunction(
2840 const std::string MemIntrinCallbackPrefix =
2844 AsanMemmove =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memmove",
2845 PtrTy, PtrTy, PtrTy, IntptrTy);
2846 AsanMemcpy =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memcpy", PtrTy,
2847 PtrTy, PtrTy, IntptrTy);
2848 AsanMemset =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memset",
2852 AsanHandleNoReturnFunc =
2855 AsanPtrCmpFunction =
2857 AsanPtrSubFunction =
2859 if (Mapping.InGlobal)
2860 AsanShadowGlobal =
M.getOrInsertGlobal(
"__asan_shadow",
2863 AMDGPUAddressShared =
2865 AMDGPUAddressPrivate =
2869bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(
Function &
F) {
2877 if (
F.getName().contains(
" load]")) {
2887bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(
Function &
F) {
2893 if (Mapping.InGlobal) {
2901 LocalDynamicShadow =
2902 IRB.
CreateCall(Asm, {AsanShadowGlobal},
".asan.shadow");
2904 LocalDynamicShadow =
2908 Value *GlobalDynamicAddress =
F.getParent()->getOrInsertGlobal(
2910 LocalDynamicShadow = IRB.
CreateLoad(IntptrTy, GlobalDynamicAddress);
2915void AddressSanitizer::markEscapedLocalAllocas(
Function &
F) {
2920 assert(ProcessedAllocas.empty() &&
"must process localescape before allocas");
2924 if (!
F.getParent()->getFunction(
"llvm.localescape"))
return;
2930 if (
II &&
II->getIntrinsicID() == Intrinsic::localescape) {
2932 for (
Value *Arg :
II->args()) {
2933 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2935 "non-static alloca arg to localescape");
2936 ProcessedAllocas[AI] =
false;
2943bool AddressSanitizer::suppressInstrumentationSiteForDebug(
int &Instrumented) {
2944 bool ShouldInstrument =
2948 return !ShouldInstrument;
2951bool AddressSanitizer::instrumentFunction(
Function &
F,
2957 if (
F.getName().starts_with(
"__asan_"))
return false;
2958 if (
F.isPresplitCoroutine())
2961 bool FunctionModified =
false;
2966 if (maybeInsertAsanInitAtFunctionEntry(
F))
2967 FunctionModified =
true;
2970 if (!
F.hasFnAttribute(Attribute::SanitizeAddress))
return FunctionModified;
2972 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2973 return FunctionModified;
2977 initializeCallbacks(TLI);
2979 FunctionStateRAII CleanupObj(
this);
2981 RuntimeCallInserter RTCI(
F);
2983 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(
F);
2987 markEscapedLocalAllocas(
F);
2999 for (
auto &BB :
F) {
3001 TempsToInstrument.
clear();
3002 int NumInsnsPerBB = 0;
3003 for (
auto &Inst : BB) {
3004 if (LooksLikeCodeInBug11395(&Inst))
return false;
3011 if (!InterestingOperands.
empty()) {
3012 for (
auto &Operand : InterestingOperands) {
3018 if (Operand.MaybeMask) {
3022 if (!TempsToInstrument.
insert(
Ptr).second)
3026 OperandsToInstrument.
push_back(Operand);
3033 PointerComparisonsOrSubtracts.
push_back(&Inst);
3039 if (
auto *CB = dyn_cast<CallBase>(&Inst)) {
3041 TempsToInstrument.
clear();
3045 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
3052 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3053 OperandsToInstrument.
size() + IntrinToInstrument.
size() >
3054 (
unsigned)InstrumentationWithCallsThreshold);
3061 int NumInstrumented = 0;
3062 for (
auto &Operand : OperandsToInstrument) {
3063 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3064 instrumentMop(ObjSizeVis, Operand, UseCalls,
3065 F.getDataLayout(), RTCI);
3066 FunctionModified =
true;
3068 for (
auto *Inst : IntrinToInstrument) {
3069 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3070 instrumentMemIntrinsic(Inst, RTCI);
3071 FunctionModified =
true;
3074 FunctionStackPoisoner FSP(
F, *
this, RTCI);
3075 bool ChangedStack = FSP.runOnFunction();
3079 for (
auto *CI : NoReturnCalls) {
3081 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3084 for (
auto *Inst : PointerComparisonsOrSubtracts) {
3085 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3086 FunctionModified =
true;
3089 if (ChangedStack || !NoReturnCalls.empty())
3090 FunctionModified =
true;
3092 LLVM_DEBUG(
dbgs() <<
"ASAN done instrumenting: " << FunctionModified <<
" "
3095 return FunctionModified;
3101bool AddressSanitizer::LooksLikeCodeInBug11395(
Instruction *
I) {
3102 if (LongSize != 32)
return false;
3111void FunctionStackPoisoner::initializeCallbacks(
Module &M) {
3115 const char *MallocNameTemplate =
3120 std::string Suffix = itostr(
Index);
3121 AsanStackMallocFunc[
Index] =
M.getOrInsertFunction(
3122 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3123 AsanStackFreeFunc[
Index] =
3128 if (ASan.UseAfterScope) {
3129 AsanPoisonStackMemoryFunc =
M.getOrInsertFunction(
3131 AsanUnpoisonStackMemoryFunc =
M.getOrInsertFunction(
3135 for (
size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3136 0xf3, 0xf5, 0xf8}) {
3137 std::ostringstream
Name;
3139 Name << std::setw(2) << std::setfill(
'0') << std::hex << Val;
3140 AsanSetShadowFunc[Val] =
3141 M.getOrInsertFunction(
Name.str(), IRB.
getVoidTy(), IntptrTy, IntptrTy);
3144 AsanAllocaPoisonFunc =
M.getOrInsertFunction(
3146 AsanAllocasUnpoisonFunc =
M.getOrInsertFunction(
3152 size_t Begin,
size_t End,
3154 Value *ShadowBase) {
3158 const size_t LargestStoreSizeInBytes =
3159 std::min<size_t>(
sizeof(
uint64_t), ASan.LongSize / 8);
3161 const bool IsLittleEndian =
F.getDataLayout().isLittleEndian();
3167 for (
size_t i = Begin; i <
End;) {
3168 if (!ShadowMask[i]) {
3174 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3176 while (StoreSizeInBytes >
End - i)
3177 StoreSizeInBytes /= 2;
3180 for (
size_t j = StoreSizeInBytes - 1;
j && !ShadowMask[i +
j]; --
j) {
3181 while (j <= StoreSizeInBytes / 2)
3182 StoreSizeInBytes /= 2;
3186 for (
size_t j = 0;
j < StoreSizeInBytes;
j++) {
3188 Val |= (
uint64_t)ShadowBytes[i + j] << (8 * j);
3190 Val = (Val << 8) | ShadowBytes[i + j];
3199 i += StoreSizeInBytes;
3206 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.
size(), IRB, ShadowBase);
3211 size_t Begin,
size_t End,
3214 size_t Done = Begin;
3215 for (
size_t i = Begin, j = Begin + 1; i <
End; i =
j++) {
3216 if (!ShadowMask[i]) {
3220 uint8_t Val = ShadowBytes[i];
3221 if (!AsanSetShadowFunc[Val])
3225 for (;
j <
End && ShadowMask[
j] && Val == ShadowBytes[
j]; ++
j) {
3228 if (j - i >= ASan.MaxInlinePoisoningSize) {
3229 copyToShadowInline(ShadowMask, ShadowBytes,
Done, i, IRB, ShadowBase);
3230 RTCI.createRuntimeCall(
3231 IRB, AsanSetShadowFunc[Val],
3232 {IRB.
CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3233 ConstantInt::get(IntptrTy, j - i)});
3238 copyToShadowInline(ShadowMask, ShadowBytes,
Done,
End, IRB, ShadowBase);
3246 for (
int i = 0;; i++, MaxSize *= 2)
3247 if (LocalStackSize <= MaxSize)
return i;
3251void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3253 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3261 if (Arg.hasByValAttr()) {
3262 Type *Ty = Arg.getParamByValType();
3263 const Align Alignment =
3264 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3268 (Arg.hasName() ? Arg.getName() :
"Arg" +
Twine(Arg.getArgNo())) +
3271 Arg.replaceAllUsesWith(AI);
3273 uint64_t AllocSize =
DL.getTypeAllocSize(Ty);
3274 IRB.
CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3282 Value *ValueIfFalse) {
3285 PHI->addIncoming(ValueIfFalse, CondBlock);
3287 PHI->addIncoming(ValueIfTrue, ThenBlock);
3291Value *FunctionStackPoisoner::createAllocaForLayout(
3300 nullptr,
"MyAlloca");
3309void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3312 DynamicAllocaLayout = IRB.
CreateAlloca(IntptrTy,
nullptr);
3317void FunctionStackPoisoner::processDynamicAllocas() {
3324 for (
const auto &APC : DynamicAllocaPoisonCallVec) {
3327 assert(ASan.isInterestingAlloca(*APC.AI));
3328 assert(!APC.AI->isStaticAlloca());
3331 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3338 createDynamicAllocasInitStorage();
3339 for (
auto &AI : DynamicAllocaVec)
3340 handleDynamicAllocaCall(AI);
3341 unpoisonDynamicAllocas();
3353 for (
Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3363 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3365 if (
auto *Store = dyn_cast<StoreInst>(It)) {
3369 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3370 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3373 Value *Val = Store->getValueOperand();
3374 bool IsDirectArgInit = isa<Argument>(Val);
3375 bool IsArgInitViaCast =
3376 isa<CastInst>(Val) &&
3377 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3380 Val == It->getPrevNonDebugInstruction();
3381 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3385 if (IsArgInitViaCast)
3386 InitInsts.
push_back(cast<Instruction>(Val));
3397void FunctionStackPoisoner::processStaticAllocas() {
3398 if (AllocaVec.
empty()) {
3403 int StackMallocIdx = -1;
3405 if (
auto SP =
F.getSubprogram())
3406 EntryDebugLocation =
3415 auto InsBeforeB = InsBefore->
getParent();
3416 assert(InsBeforeB == &
F.getEntryBlock());
3417 for (
auto *AI : StaticAllocasToMoveUp)
3428 ArgInitInst->moveBefore(InsBefore);
3431 if (LocalEscapeCall) LocalEscapeCall->
moveBefore(InsBefore);
3437 ASan.getAllocaSizeInBytes(*AI),
3448 uint64_t Granularity = 1ULL << Mapping.Scale;
3449 uint64_t MinHeaderSize = std::max((
uint64_t)ASan.LongSize / 2, Granularity);
3455 for (
auto &
Desc : SVD)
3459 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3462 assert(ASan.isInterestingAlloca(*APC.AI));
3463 assert(APC.AI->isStaticAlloca());
3468 if (
const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3469 if (LifetimeLoc->getFile() == FnLoc->getFile())
3470 if (
unsigned Line = LifetimeLoc->getLine())
3471 Desc.Line = std::min(
Desc.Line ?
Desc.Line : Line, Line);
3477 LLVM_DEBUG(
dbgs() << DescriptionString <<
" --- " <<
L.FrameSize <<
"\n");
3479 bool DoStackMalloc =
3489 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3490 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3492 Value *StaticAlloca =
3493 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L,
false);
3496 Value *LocalStackBase;
3497 Value *LocalStackBaseAlloca;
3500 if (DoStackMalloc) {
3501 LocalStackBaseAlloca =
3502 IRB.
CreateAlloca(IntptrTy,
nullptr,
"asan_local_stack_base");
3509 Constant *OptionDetectUseAfterReturn =
F.getParent()->getOrInsertGlobal(
3519 Value *FakeStackValue =
3520 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3521 ConstantInt::get(IntptrTy, LocalStackSize));
3523 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3524 ConstantInt::get(IntptrTy, 0));
3532 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3533 ConstantInt::get(IntptrTy, LocalStackSize));
3535 Value *NoFakeStack =
3540 Value *AllocaValue =
3541 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L,
true) : StaticAlloca;
3544 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3545 IRB.
CreateStore(LocalStackBase, LocalStackBaseAlloca);
3550 FakeStack = ConstantInt::get(IntptrTy, 0);
3552 DoDynamicAlloca ? createAllocaForLayout(IRB, L,
true) : StaticAlloca;
3553 LocalStackBaseAlloca = LocalStackBase;
3559 Value *LocalStackBaseAllocaPtr =
3560 isa<PtrToIntInst>(LocalStackBaseAlloca)
3561 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3562 : LocalStackBaseAlloca;
3563 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3564 "Variable descriptions relative to ASan stack base will be dropped");
3567 for (
const auto &
Desc : SVD) {
3572 IRB.
CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
Desc.Offset)),
3585 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3595 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3602 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3605 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3607 if (!StaticAllocaPoisonCallVec.empty()) {
3611 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3614 size_t Begin =
Desc.Offset /
L.Granularity;
3615 size_t End = Begin + (APC.Size +
L.Granularity - 1) /
L.Granularity;
3618 copyToShadow(ShadowAfterScope,
3619 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin,
End,
3633 if (DoStackMalloc) {
3634 assert(StackMallocIdx >= 0);
3651 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3653 ShadowAfterReturn.
resize(ClassSize /
L.Granularity,
3655 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3657 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3659 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3660 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3661 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3662 IRBPoison.CreateStore(
3664 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3667 RTCI.createRuntimeCall(
3668 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3669 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3673 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3675 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3680 for (
auto *AI : AllocaVec)
3688 Value *SizeArg = ConstantInt::get(IntptrTy,
Size);
3689 RTCI.createRuntimeCall(
3690 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3691 {AddrArg, SizeArg});
3702void FunctionStackPoisoner::handleDynamicAllocaCall(
AllocaInst *AI) {
3710 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3716 const unsigned ElementSize =
3720 ConstantInt::get(IntptrTy, ElementSize));
3748 ConstantInt::get(IntptrTy, Alignment.
value()));
3751 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
static const uint64_t kEmscriptenShadowOffset
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
This defines the Use class.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
@ SameSize
The data referenced by the COMDAT must be the same size.
@ Any
The linker may choose any COMDAT.
@ NoDeduplicate
No deduplication is performed.
@ ExactMatch
The data referenced by the COMDAT must be the same.
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
DILocation * get() const
Get the underlying DILocation.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const Constant * getAliasee() const
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
void setComdat(Comdat *C)
void setSection(StringRef S)
Change the section for this global.
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
Module * getParent()
Get the module that this global value is contained inside of...
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ CommonLinkage
Tentative definitions.
@ InternalLinkage
Rename collisions when linking (static functions).
@ AvailableExternallyLinkage
Available for inspection, not emission.
@ ExternalWeakLinkage
ExternalWeak linkage description.
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Base class for instruction visitors.
RetTy visitCallBase(CallBase &I)
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
RetTy visitIntrinsicInst(IntrinsicInst &I)
void visit(Iterator Start, Iterator End)
RetTy visitReturnInst(ReturnInst &I)
RetTy visitAllocaInst(AllocaInst &I)
RetTy visitResumeInst(ResumeInst &I)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Evaluate the size and offset of an object pointed to by a Value* statically.
SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Pass interface - Implemented by all 'passes'.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
bool isDriverKit() const
Is this an Apple DriverKit triple.
bool isAndroid() const
Tests whether the target is Android.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isOSWindows() const
Tests whether the OS is Windows.
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isOSLinux() const
Tests whether the OS is Linux.
bool isMacOSX() const
Is this a Mac OS X triple.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
bool isWatchOS() const
Is this an Apple watchOS triple.
bool isiOS() const
Is this an iOS triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt32Ty(LLVMContext &C)
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Linkage
Describes symbol linkage. This can be used to resolve definition clashes.
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, Instruction *InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
@ Dynamic
Denotes mode unknown at compile time.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AsanCtorKind
Types of ASan module constructors supported.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
int InstrumentationWithCallsThreshold
uint32_t MaxInlinePoisoningSize
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Various options to control the behavior of getObjectSize.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.