94#define DEBUG_TYPE "asan"
100 std::numeric_limits<uint64_t>::max();
141 "__asan_unregister_image_globals";
154 "__asan_stack_malloc_always_";
168 "__asan_option_detect_stack_use_after_return";
171 "__asan_shadow_memory_dynamic_address";
197 "asan-kernel",
cl::desc(
"Enable KernelAddressSanitizer instrumentation"),
202 cl::desc(
"Enable recovery mode (continue-after-error)."),
206 "asan-guard-against-version-mismatch",
212 cl::desc(
"instrument read instructions"),
216 "asan-instrument-writes",
cl::desc(
"instrument write instructions"),
225 "asan-instrument-atomics",
235 "asan-always-slow-path",
240 "asan-force-dynamic-shadow",
241 cl::desc(
"Load shadow address into a local variable for each function"),
246 cl::desc(
"Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
251 "asan-with-ifunc-suppress-remat",
252 cl::desc(
"Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
261 "asan-max-ins-per-bb",
cl::init(10000),
262 cl::desc(
"maximal number of instructions to instrument in any given BB"),
269 "asan-max-inline-poisoning-size",
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
275 "asan-use-after-return",
276 cl::desc(
"Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never,
"never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime,
"runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always,
"always",
285 "Always detect stack use after return.")),
289 cl::desc(
"Create redzones for byval "
290 "arguments (extra copy "
295 cl::desc(
"Check stack-use-after-scope"),
304 cl::desc(
"Handle C++ initializer order"),
308 "asan-detect-invalid-pointer-pair",
313 "asan-detect-invalid-pointer-cmp",
318 "asan-detect-invalid-pointer-sub",
323 "asan-realign-stack",
324 cl::desc(
"Realign stack to the value of this flag (power of two)"),
328 "asan-instrumentation-with-call-threshold",
329 cl::desc(
"If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
335 "asan-memory-access-callback-prefix",
340 "asan-kernel-mem-intrinsic-prefix",
346 cl::desc(
"instrument dynamic allocas"),
350 "asan-skip-promotable-allocas",
355 "asan-constructor-kind",
356 cl::desc(
"Sets the ASan constructor kind"),
359 "Use global constructors")),
366 cl::desc(
"scale of asan shadow mapping"),
371 cl::desc(
"offset of asan shadow mapping [EXPERIMENTAL]"),
385 "asan-opt-same-temp",
cl::desc(
"Instrument the same temp just once"),
389 cl::desc(
"Don't instrument scalar globals"),
393 "asan-opt-stack",
cl::desc(
"Don't instrument scalar stack variables"),
397 "asan-stack-dynamic-alloca",
402 "asan-force-experiment",
408 cl::desc(
"Use private aliases for global variables"),
413 cl::desc(
"Use odr indicators to improve ODR reporting"),
418 cl::desc(
"Use linker features to support dead "
419 "code stripping of globals"),
426 cl::desc(
"Place ASan constructors in comdat sections"),
430 "asan-destructor-kind",
431 cl::desc(
"Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
435 "Use global destructors")),
455STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
458 "Number of optimized accesses to global vars");
460 "Number of optimized accesses to stack vars");
469struct ShadowMapping {
480 bool IsAndroid = TargetTriple.
isAndroid();
483 bool IsMacOS = TargetTriple.
isMacOSX();
486 bool IsPS = TargetTriple.
isPS();
492 bool IsMIPSN32ABI = TargetTriple.
isABIN32();
493 bool IsMIPS32 = TargetTriple.
isMIPS32();
494 bool IsMIPS64 = TargetTriple.
isMIPS64();
495 bool IsArmOrThumb = TargetTriple.
isARM() || TargetTriple.
isThumb();
503 bool IsAMDGPU = TargetTriple.
isAMDGPU();
505 ShadowMapping Mapping;
512 if (LongSize == 32) {
515 else if (IsMIPSN32ABI)
527 else if (IsEmscripten)
540 else if (IsFreeBSD && IsAArch64)
542 else if (IsFreeBSD && !IsMIPS64) {
547 }
else if (IsNetBSD) {
554 else if (IsLinux && IsX86_64) {
560 }
else if (IsWindows && IsX86_64) {
566 else if (IsMacOS && IsAArch64)
570 else if (IsLoongArch64)
594 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
595 !IsRISCV64 && !IsLoongArch64 &&
596 !(Mapping.Offset & (Mapping.Offset - 1)) &&
598 bool IsAndroidWithIfuncSupport =
600 Mapping.InGlobal =
ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
608 int *MappingScale,
bool *OrShadowOffset) {
610 *ShadowBase = Mapping.Offset;
611 *MappingScale = Mapping.Scale;
612 *OrShadowOffset = Mapping.OrShadowOffset;
626 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
627 CompileKernel(CompileKernel) {}
634 return std::max(32U, 1U << MappingScale);
653class RuntimeCallInserter {
655 bool TrackInsertedCalls =
false;
659 RuntimeCallInserter(
Function &Fn) : OwnerFn(&Fn) {
663 TrackInsertedCalls =
true;
667 ~RuntimeCallInserter() {
668 if (InsertedCalls.
empty())
670 assert(TrackInsertedCalls &&
"Calls were wrongly tracked");
673 for (
CallInst *CI : InsertedCalls) {
675 assert(BB &&
"Instruction doesn't belong to a BasicBlock");
677 "Instruction doesn't belong to the expected Function!");
685 if (Colors.
size() != 1) {
687 "Instruction's BasicBlock is not monochromatic");
694 if (EHPad && EHPad->
isEHPad()) {
698 OB, CI->getIterator());
699 NewCall->copyMetadata(*CI);
700 CI->replaceAllUsesWith(NewCall);
701 CI->eraseFromParent();
712 if (TrackInsertedCalls)
713 InsertedCalls.push_back(Inst);
719struct AddressSanitizer {
721 int InstrumentationWithCallsThreshold,
722 uint32_t MaxInlinePoisoningSize,
bool CompileKernel =
false,
723 bool Recover =
false,
bool UseAfterScope =
false,
725 AsanDetectStackUseAfterReturnMode::Runtime)
734 InstrumentationWithCallsThreshold(
737 : InstrumentationWithCallsThreshold),
740 : MaxInlinePoisoningSize) {
741 C = &(
M.getContext());
742 DL = &
M.getDataLayout();
743 LongSize =
M.getDataLayout().getPointerSizeInBits();
745 PtrTy = PointerType::getUnqual(*C);
747 TargetTriple =
Triple(
M.getTargetTriple());
751 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
759 bool isInterestingAlloca(
const AllocaInst &AI);
767 const DataLayout &DL, RuntimeCallInserter &RTCI);
768 void instrumentPointerComparisonOrSubtraction(
Instruction *
I,
769 RuntimeCallInserter &RTCI);
772 uint32_t TypeStoreSize,
bool IsWrite,
774 RuntimeCallInserter &RTCI);
777 uint32_t TypeStoreSize,
bool IsWrite,
778 Value *SizeArgument);
783 TypeSize TypeStoreSize,
bool IsWrite,
784 Value *SizeArgument,
bool UseCalls,
786 RuntimeCallInserter &RTCI);
787 void instrumentMaskedLoadOrStore(AddressSanitizer *
Pass,
const DataLayout &DL,
791 Type *OpType,
bool IsWrite,
792 Value *SizeArgument,
bool UseCalls,
793 uint32_t Exp, RuntimeCallInserter &RTCI);
797 bool IsWrite,
size_t AccessSizeIndex,
799 RuntimeCallInserter &RTCI);
800 void instrumentMemIntrinsic(
MemIntrinsic *
MI, RuntimeCallInserter &RTCI);
802 bool suppressInstrumentationSiteForDebug(
int &Instrumented);
804 bool maybeInsertAsanInitAtFunctionEntry(
Function &
F);
805 bool maybeInsertDynamicShadowAtFunctionEntry(
Function &
F);
806 void markEscapedLocalAllocas(
Function &
F);
809 friend struct FunctionStackPoisoner;
819 struct FunctionStateRAII {
820 AddressSanitizer *
Pass;
822 FunctionStateRAII(AddressSanitizer *
Pass) :
Pass(
Pass) {
824 "last pass forgot to clear cache");
828 ~FunctionStateRAII() {
829 Pass->LocalDynamicShadow =
nullptr;
830 Pass->ProcessedAllocas.clear();
846 ShadowMapping Mapping;
860 Value *LocalDynamicShadow =
nullptr;
866 int InstrumentationWithCallsThreshold;
870class ModuleAddressSanitizer {
872 ModuleAddressSanitizer(
Module &M,
bool InsertVersionCheck,
873 bool CompileKernel =
false,
bool Recover =
false,
874 bool UseGlobalsGC =
true,
bool UseOdrIndicator =
true,
882 : InsertVersionCheck),
884 UseGlobalsGC(UseGlobalsGC &&
ClUseGlobalsGC && !this->CompileKernel),
899 UseCtorComdat(UseGlobalsGC &&
ClWithComdat && !this->CompileKernel),
900 DestructorKind(DestructorKind),
904 C = &(
M.getContext());
905 int LongSize =
M.getDataLayout().getPointerSizeInBits();
907 PtrTy = PointerType::getUnqual(*C);
908 TargetTriple =
Triple(
M.getTargetTriple());
913 assert(this->DestructorKind != AsanDtorKind::Invalid);
916 bool instrumentModule();
919 void initializeCallbacks();
921 void instrumentGlobals(
IRBuilder<> &IRB,
bool *CtorComdat);
928 const std::string &UniqueModuleId);
933 InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB,
945 bool ShouldUseMachOGlobalsSection()
const;
946 StringRef getGlobalMetadataSection()
const;
947 void poisonOneInitializer(
Function &GlobalInit);
948 void createInitializerPoisonCalls();
949 uint64_t getMinRedzoneSizeForGlobal()
const {
953 int GetAsanVersion()
const;
958 bool InsertVersionCheck;
961 bool UsePrivateAlias;
962 bool UseOdrIndicator;
970 ShadowMapping Mapping;
980 Function *AsanCtorFunction =
nullptr;
981 Function *AsanDtorFunction =
nullptr;
994struct FunctionStackPoisoner :
public InstVisitor<FunctionStackPoisoner> {
996 AddressSanitizer &ASan;
997 RuntimeCallInserter &RTCI;
1002 ShadowMapping Mapping;
1011 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1015 struct AllocaPoisonCall {
1023 bool HasUntracedLifetimeIntrinsic =
false;
1030 bool HasInlineAsm =
false;
1031 bool HasReturnsTwiceCall =
false;
1034 FunctionStackPoisoner(
Function &F, AddressSanitizer &ASan,
1035 RuntimeCallInserter &RTCI)
1036 :
F(
F), ASan(ASan), RTCI(RTCI),
1038 IntptrTy(ASan.IntptrTy), IntptrPtrTy(
PointerType::
get(IntptrTy, 0)),
1039 Mapping(ASan.Mapping),
1048 copyArgsPassedByValToAllocas();
1053 if (AllocaVec.
empty() && DynamicAllocaVec.
empty())
return false;
1055 initializeCallbacks(*
F.getParent());
1057 if (HasUntracedLifetimeIntrinsic) {
1061 StaticAllocaPoisonCallVec.
clear();
1062 DynamicAllocaPoisonCallVec.
clear();
1065 processDynamicAllocas();
1066 processStaticAllocas();
1077 void copyArgsPassedByValToAllocas();
1082 void processStaticAllocas();
1083 void processDynamicAllocas();
1085 void createDynamicAllocasInitStorage();
1103 void unpoisonDynamicAllocasBeforeInst(
Instruction *InstBefore,
1104 Value *SavedStack) {
1111 if (!isa<ReturnInst>(InstBefore)) {
1113 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1119 RTCI.createRuntimeCall(
1120 IRB, AsanAllocasUnpoisonFunc,
1121 {IRB.
CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1125 void unpoisonDynamicAllocas() {
1127 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1129 for (
Instruction *StackRestoreInst : StackRestoreVec)
1130 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1131 StackRestoreInst->getOperand(0));
1144 void handleDynamicAllocaCall(
AllocaInst *AI);
1150 const auto *STy = dyn_cast<StructType>(AllocaType);
1151 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1152 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1156 if (AllocaVec.
empty())
1174 if (
ID == Intrinsic::stackrestore) StackRestoreVec.
push_back(&
II);
1175 if (
ID == Intrinsic::localescape) LocalEscapeCall = &
II;
1176 if (!ASan.UseAfterScope)
1178 if (!
II.isLifetimeStartOrEnd())
1181 auto *
Size = cast<ConstantInt>(
II.getArgOperand(0));
1183 if (
Size->isMinusOne())
return;
1186 const uint64_t SizeValue =
Size->getValue().getLimitedValue();
1187 if (SizeValue == ~0ULL ||
1195 HasUntracedLifetimeIntrinsic =
true;
1199 if (!ASan.isInterestingAlloca(*AI))
1201 bool DoPoison = (
ID == Intrinsic::lifetime_end);
1202 AllocaPoisonCall APC = {&
II, AI, SizeValue, DoPoison};
1204 StaticAllocaPoisonCallVec.
push_back(APC);
1206 DynamicAllocaPoisonCallVec.
push_back(APC);
1210 if (
CallInst *CI = dyn_cast<CallInst>(&CB)) {
1211 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1212 HasReturnsTwiceCall |= CI->canReturnTwice();
1217 void initializeCallbacks(
Module &M);
1244 OS, MapClassName2PassName);
1256 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1257 ConstructorKind(ConstructorKind) {}
1266 ModuleAddressSanitizer ModuleSanitizer(
1268 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1274 AddressSanitizer FunctionSanitizer(
1279 Modified |= FunctionSanitizer.instrumentFunction(
F, &TLI);
1281 Modified |= ModuleSanitizer.instrumentModule();
1302 if (
G->getName().starts_with(
"llvm.") ||
1304 G->getName().starts_with(
"__llvm_gcov_ctr") ||
1306 G->getName().starts_with(
"__llvm_rtti_proxy"))
1319 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
1321 if (AddrSpace == 3 || AddrSpace == 5)
1328 Shadow = IRB.
CreateLShr(Shadow, Mapping.Scale);
1329 if (Mapping.Offset == 0)
return Shadow;
1332 if (LocalDynamicShadow)
1333 ShadowBase = LocalDynamicShadow;
1335 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1336 if (Mapping.OrShadowOffset)
1337 return IRB.
CreateOr(Shadow, ShadowBase);
1339 return IRB.
CreateAdd(Shadow, ShadowBase);
1344 RuntimeCallInserter &RTCI) {
1346 if (isa<MemTransferInst>(
MI)) {
1347 RTCI.createRuntimeCall(
1348 IRB, isa<MemMoveInst>(
MI) ? AsanMemmove : AsanMemcpy,
1352 }
else if (isa<MemSetInst>(
MI)) {
1353 RTCI.createRuntimeCall(
1359 MI->eraseFromParent();
1363bool AddressSanitizer::isInterestingAlloca(
const AllocaInst &AI) {
1364 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1366 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1367 return PreviouslySeenAllocaInfo->getSecond();
1369 bool IsInteresting =
1382 !(SSGI && SSGI->
isSafe(AI)));
1384 ProcessedAllocas[&AI] = IsInteresting;
1385 return IsInteresting;
1390 Type *PtrTy = cast<PointerType>(
Ptr->getType()->getScalarType());
1399 if (
Ptr->isSwiftError())
1405 if (
auto AI = dyn_cast_or_null<AllocaInst>(
Ptr))
1416void AddressSanitizer::getInterestingMemoryOperands(
1419 if (LocalDynamicShadow ==
I)
1422 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
1425 Interesting.
emplace_back(
I, LI->getPointerOperandIndex(),
false,
1426 LI->getType(), LI->getAlign());
1427 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(
I)) {
1431 SI->getValueOperand()->getType(),
SI->getAlign());
1435 Interesting.
emplace_back(
I, RMW->getPointerOperandIndex(),
true,
1436 RMW->getValOperand()->getType(), std::nullopt);
1440 Interesting.
emplace_back(
I, XCHG->getPointerOperandIndex(),
true,
1441 XCHG->getCompareOperand()->getType(),
1443 }
else if (
auto CI = dyn_cast<CallInst>(
I)) {
1444 switch (CI->getIntrinsicID()) {
1445 case Intrinsic::masked_load:
1446 case Intrinsic::masked_store:
1447 case Intrinsic::masked_gather:
1448 case Intrinsic::masked_scatter: {
1449 bool IsWrite = CI->getType()->isVoidTy();
1451 unsigned OpOffset = IsWrite ? 1 : 0;
1455 auto BasePtr = CI->getOperand(OpOffset);
1456 if (ignoreAccess(
I, BasePtr))
1458 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1461 if (
auto *
Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1462 Alignment =
Op->getMaybeAlignValue();
1463 Value *
Mask = CI->getOperand(2 + OpOffset);
1464 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, Mask);
1467 case Intrinsic::masked_expandload:
1468 case Intrinsic::masked_compressstore: {
1469 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1470 unsigned OpOffset = IsWrite ? 1 : 0;
1473 auto BasePtr = CI->getOperand(OpOffset);
1474 if (ignoreAccess(
I, BasePtr))
1477 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1480 Value *
Mask = CI->getOperand(1 + OpOffset);
1483 Value *ExtMask =
IB.CreateZExt(Mask, ExtTy);
1484 Value *EVL =
IB.CreateAddReduce(ExtMask);
1485 Value *TrueMask = ConstantInt::get(
Mask->getType(), 1);
1486 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1490 case Intrinsic::vp_load:
1491 case Intrinsic::vp_store:
1492 case Intrinsic::experimental_vp_strided_load:
1493 case Intrinsic::experimental_vp_strided_store: {
1494 auto *VPI = cast<VPIntrinsic>(CI);
1495 unsigned IID = CI->getIntrinsicID();
1496 bool IsWrite = CI->getType()->isVoidTy();
1499 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1500 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1501 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*
DL);
1502 Value *Stride =
nullptr;
1503 if (IID == Intrinsic::experimental_vp_strided_store ||
1504 IID == Intrinsic::experimental_vp_strided_load) {
1505 Stride = VPI->getOperand(PtrOpNo + 1);
1510 if (!isa<ConstantInt>(Stride) ||
1511 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1512 Alignment =
Align(1);
1514 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1515 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1519 case Intrinsic::vp_gather:
1520 case Intrinsic::vp_scatter: {
1521 auto *VPI = cast<VPIntrinsic>(CI);
1522 unsigned IID = CI->getIntrinsicID();
1523 bool IsWrite = IID == Intrinsic::vp_scatter;
1526 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1527 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1528 MaybeAlign Alignment = VPI->getPointerAlignment();
1529 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1530 VPI->getMaskParam(),
1531 VPI->getVectorLengthParam());
1535 for (
unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1537 ignoreAccess(
I, CI->getArgOperand(ArgNo)))
1539 Type *Ty = CI->getParamByValType(ArgNo);
1547 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1554 if (
ICmpInst *Cmp = dyn_cast<ICmpInst>(
I)) {
1555 if (!Cmp->isRelational())
1569 if (BO->getOpcode() != Instruction::Sub)
1582 if (!
G->hasInitializer())
1585 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().IsDynInit)
1591void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1594 FunctionCallee F = isa<ICmpInst>(
I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1595 Value *
Param[2] = {
I->getOperand(0),
I->getOperand(1)};
1596 for (
Value *&i : Param) {
1597 if (i->getType()->isPointerTy())
1600 RTCI.createRuntimeCall(IRB,
F, Param);
1606 TypeSize TypeStoreSize,
bool IsWrite,
1607 Value *SizeArgument,
bool UseCalls,
1608 uint32_t Exp, RuntimeCallInserter &RTCI) {
1613 switch (FixedSize) {
1619 if (!Alignment || *Alignment >= Granularity ||
1620 *Alignment >= FixedSize / 8)
1621 return Pass->instrumentAddress(
I, InsertBefore,
Addr, Alignment,
1622 FixedSize, IsWrite,
nullptr, UseCalls,
1626 Pass->instrumentUnusualSizeOrAlignment(
I, InsertBefore,
Addr, TypeStoreSize,
1627 IsWrite,
nullptr, UseCalls, Exp, RTCI);
1630void AddressSanitizer::instrumentMaskedLoadOrStore(
1633 MaybeAlign Alignment,
unsigned Granularity,
Type *OpType,
bool IsWrite,
1635 RuntimeCallInserter &RTCI) {
1636 auto *VTy = cast<VectorType>(OpType);
1637 TypeSize ElemTypeSize =
DL.getTypeStoreSizeInBits(VTy->getScalarType());
1638 auto Zero = ConstantInt::get(IntptrTy, 0);
1646 Value *IsEVLZero =
IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1648 IB.SetInsertPoint(LoopInsertBefore);
1650 EVL =
IB.CreateZExtOrTrunc(EVL, IntptrTy);
1653 Value *
EC =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1654 EVL =
IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1656 EVL =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1661 Stride =
IB.CreateZExtOrTrunc(Stride, IntptrTy);
1666 if (
auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1667 if (MaskElemC->isZero())
1678 Value *InstrumentedAddress;
1679 if (isa<VectorType>(
Addr->getType())) {
1681 cast<VectorType>(
Addr->getType())->getElementType()->isPointerTy() &&
1682 "Expected vector of pointer.");
1684 }
else if (Stride) {
1691 Alignment, Granularity, ElemTypeSize, IsWrite,
1692 SizeArgument, UseCalls, Exp, RTCI);
1699 RuntimeCallInserter &RTCI) {
1720 isSafeAccess(ObjSizeVis,
Addr,
O.TypeStoreSize)) {
1721 NumOptimizedAccessesToGlobalVar++;
1729 isSafeAccess(ObjSizeVis,
Addr,
O.TypeStoreSize)) {
1730 NumOptimizedAccessesToStackVar++;
1736 NumInstrumentedWrites++;
1738 NumInstrumentedReads++;
1740 unsigned Granularity = 1 << Mapping.Scale;
1742 instrumentMaskedLoadOrStore(
this,
DL, IntptrTy,
O.MaybeMask,
O.MaybeEVL,
1743 O.MaybeStride,
O.getInsn(),
Addr,
O.Alignment,
1744 Granularity,
O.OpType,
O.IsWrite,
nullptr,
1745 UseCalls, Exp, RTCI);
1748 Granularity,
O.TypeStoreSize,
O.IsWrite,
nullptr,
1749 UseCalls, Exp, RTCI);
1755 size_t AccessSizeIndex,
1756 Value *SizeArgument,
1758 RuntimeCallInserter &RTCI) {
1764 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1765 {
Addr, SizeArgument});
1767 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1768 {
Addr, SizeArgument, ExpVal});
1771 Call = RTCI.createRuntimeCall(
1772 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex],
Addr);
1774 Call = RTCI.createRuntimeCall(
1775 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {
Addr, ExpVal});
1778 Call->setCannotMerge();
1785 size_t Granularity =
static_cast<size_t>(1) << Mapping.Scale;
1787 Value *LastAccessedByte =
1788 IRB.
CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1790 if (TypeStoreSize / 8 > 1)
1792 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1800Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1802 uint32_t TypeStoreSize,
bool IsWrite,
Value *SizeArgument) {
1806 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
1809 return InsertBefore;
1814 Value *IsSharedOrPrivate = IRB.
CreateOr(IsShared, IsPrivate);
1816 Value *AddrSpaceZeroLanding =
1818 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1819 return InsertBefore;
1835 Trm->getParent()->setName(
"asan.report");
1846void AddressSanitizer::instrumentAddress(
Instruction *OrigIns,
1849 uint32_t TypeStoreSize,
bool IsWrite,
1850 Value *SizeArgument,
bool UseCalls,
1852 RuntimeCallInserter &RTCI) {
1853 if (TargetTriple.isAMDGPU()) {
1854 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore,
Addr,
1855 TypeStoreSize, IsWrite, SizeArgument);
1864 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1867 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1874 RTCI.createRuntimeCall(
1875 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1877 RTCI.createRuntimeCall(
1878 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1879 {AddrLong, ConstantInt::get(IRB.
getInt32Ty(), Exp)});
1886 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1888 std::max<uint64_t>(Alignment.
valueOrOne().
value() >> Mapping.Scale, 1);
1893 size_t Granularity = 1ULL << Mapping.Scale;
1896 bool GenSlowPath = (
ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1898 if (TargetTriple.isAMDGCN()) {
1900 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1903 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1904 }
else if (GenSlowPath) {
1909 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1912 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1927 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1936void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1938 TypeSize TypeStoreSize,
bool IsWrite,
Value *SizeArgument,
bool UseCalls,
1939 uint32_t Exp, RuntimeCallInserter &RTCI) {
1947 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1950 RTCI.createRuntimeCall(
1951 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
1965void ModuleAddressSanitizer::poisonOneInitializer(
Function &GlobalInit) {
1971 Value *ModuleNameAddr =
1973 IRB.
CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1976 for (
auto &BB : GlobalInit)
1981void ModuleAddressSanitizer::createInitializerPoisonCalls() {
1991 if (isa<ConstantAggregateZero>(
OP))
continue;
1997 auto *Priority = cast<ConstantInt>(CS->
getOperand(0));
2001 poisonOneInitializer(*
F);
2007ModuleAddressSanitizer::getExcludedAliasedGlobal(
const GlobalAlias &GA)
const {
2012 assert(CompileKernel &&
"Only expecting to be called when compiling kernel");
2019 return dyn_cast<GlobalVariable>(
C->stripPointerCastsAndAliases());
2024bool ModuleAddressSanitizer::shouldInstrumentGlobal(
GlobalVariable *
G)
const {
2025 Type *Ty =
G->getValueType();
2028 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().NoAddress)
2030 if (!Ty->
isSized())
return false;
2031 if (!
G->hasInitializer())
return false;
2033 if (
G->getAddressSpace() &&
2040 if (
G->isThreadLocal())
return false;
2042 if (
G->getAlign() && *
G->getAlign() > getMinRedzoneSizeForGlobal())
return false;
2048 if (!TargetTriple.isOSBinFormatCOFF()) {
2049 if (!
G->hasExactDefinition() ||
G->hasComdat())
2053 if (
G->isInterposable())
2057 if (
G->hasAvailableExternallyLinkage())
2064 switch (
C->getSelectionKind()) {
2075 if (
G->hasSection()) {
2085 if (Section ==
"llvm.metadata")
return false;
2092 if (
Section.starts_with(
".preinit_array") ||
2093 Section.starts_with(
".init_array") ||
2094 Section.starts_with(
".fini_array")) {
2100 if (TargetTriple.isOSBinFormatELF()) {
2102 [](
char c) {
return llvm::isAlnum(c) || c ==
'_'; }))
2114 if (TargetTriple.isOSBinFormatCOFF() &&
Section.contains(
'$')) {
2115 LLVM_DEBUG(
dbgs() <<
"Ignoring global in sorted section (contains '$'): "
2120 if (TargetTriple.isOSBinFormatMachO()) {
2122 unsigned TAA = 0, StubSize = 0;
2125 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2130 if (ParsedSegment ==
"__OBJC" ||
2131 (ParsedSegment ==
"__DATA" && ParsedSection.
starts_with(
"__objc_"))) {
2143 if (ParsedSegment ==
"__DATA" && ParsedSection ==
"__cfstring") {
2156 if (CompileKernel) {
2159 if (
G->getName().starts_with(
"__"))
2169bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection()
const {
2170 if (!TargetTriple.isOSBinFormatMachO())
2173 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2175 if (TargetTriple.isiOS() && !TargetTriple.isOSVersionLT(9))
2177 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2179 if (TargetTriple.isDriverKit())
2181 if (TargetTriple.isXROS())
2187StringRef ModuleAddressSanitizer::getGlobalMetadataSection()
const {
2188 switch (TargetTriple.getObjectFormat()) {
2198 "ModuleAddressSanitizer not implemented for object file format");
2205void ModuleAddressSanitizer::initializeCallbacks() {
2211 AsanUnpoisonGlobals =
2215 AsanRegisterGlobals =
M.getOrInsertFunction(
2217 AsanUnregisterGlobals =
M.getOrInsertFunction(
2222 AsanRegisterImageGlobals =
M.getOrInsertFunction(
2224 AsanUnregisterImageGlobals =
M.getOrInsertFunction(
2227 AsanRegisterElfGlobals =
2229 IntptrTy, IntptrTy, IntptrTy);
2230 AsanUnregisterElfGlobals =
2232 IntptrTy, IntptrTy, IntptrTy);
2237void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2242 if (!
G->hasName()) {
2246 G->setName(
genName(
"anon_global"));
2249 if (!InternalSuffix.
empty() &&
G->hasLocalLinkage()) {
2250 std::string
Name = std::string(
G->getName());
2251 Name += InternalSuffix;
2252 C =
M.getOrInsertComdat(
Name);
2254 C =
M.getOrInsertComdat(
G->getName());
2260 if (TargetTriple.isOSBinFormatCOFF()) {
2262 if (
G->hasPrivateLinkage())
2275ModuleAddressSanitizer::CreateMetadataGlobal(
Constant *Initializer,
2277 auto Linkage = TargetTriple.isOSBinFormatMachO()
2281 M, Initializer->
getType(),
false, Linkage, Initializer,
2283 Metadata->setSection(getGlobalMetadataSection());
2290Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2294 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2302void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2306 auto &
DL =
M.getDataLayout();
2309 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2310 Constant *Initializer = MetadataInitializers[i];
2314 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2320 unsigned SizeOfGlobalStruct =
DL.getTypeAllocSize(Initializer->
getType());
2322 "global metadata will not be padded appropriately");
2325 SetComdatForGlobalMetadata(
G,
Metadata,
"");
2330 if (!MetadataGlobals.empty())
2334void ModuleAddressSanitizer::instrumentGlobalsELF(
2337 const std::string &UniqueModuleId) {
2344 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2347 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2350 CreateMetadataGlobal(MetadataInitializers[i],
G->getName());
2352 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2355 if (UseComdatForGlobalsGC)
2356 SetComdatForGlobalMetadata(
G,
Metadata, UniqueModuleId);
2361 if (!MetadataGlobals.empty())
2378 "__start_" + getGlobalMetadataSection());
2382 "__stop_" + getGlobalMetadataSection());
2396 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2403void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2414 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2415 Constant *Initializer = MetadataInitializers[i];
2421 auto LivenessBinder =
2426 Twine(
"__asan_binder_") +
G->getName());
2427 Liveness->
setSection(
"__DATA,__asan_liveness,regular,live_support");
2428 LivenessGlobals[i] = Liveness;
2435 if (!LivenessGlobals.empty())
2457 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2462void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2466 unsigned N = ExtendedGlobals.
size();
2476 if (Mapping.Scale > 3)
2477 AllGlobals->setAlignment(
Align(1ULL << Mapping.Scale));
2482 ConstantInt::get(IntptrTy,
N)});
2488 IrbDtor.CreateCall(AsanUnregisterGlobals,
2490 ConstantInt::get(IntptrTy,
N)});
2499void ModuleAddressSanitizer::instrumentGlobals(
IRBuilder<> &IRB,
2504 if (CompileKernel) {
2505 for (
auto &GA :
M.aliases()) {
2507 AliasedGlobalExclusions.
insert(GV);
2512 for (
auto &
G :
M.globals()) {
2513 if (!AliasedGlobalExclusions.
count(&
G) && shouldInstrumentGlobal(&
G))
2517 size_t n = GlobalsToChange.
size();
2518 auto &
DL =
M.getDataLayout();
2532 IntptrTy, IntptrTy, IntptrTy);
2536 for (
size_t i = 0; i < n; i++) {
2540 if (
G->hasSanitizerMetadata())
2541 MD =
G->getSanitizerMetadata();
2546 std::string NameForGlobal =
G->getName().str();
2551 Type *Ty =
G->getValueType();
2552 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
2565 M, NewTy,
G->isConstant(), Linkage, NewInitializer,
"",
G,
2566 G->getThreadLocalMode(),
G->getAddressSpace());
2576 if (TargetTriple.isOSBinFormatMachO() && !
G->hasSection() &&
2578 auto Seq = dyn_cast<ConstantDataSequential>(
G->getInitializer());
2579 if (Seq && Seq->isCString())
2580 NewGlobal->
setSection(
"__TEXT,__asan_cstring,regular");
2591 G->replaceAllUsesWith(
2594 G->eraseFromParent();
2595 NewGlobals[i] = NewGlobal;
2600 bool CanUsePrivateAliases =
2601 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2602 TargetTriple.isOSBinFormatWasm();
2603 if (CanUsePrivateAliases && UsePrivateAlias) {
2606 InstrumentedGlobal =
2614 }
else if (UseOdrIndicator) {
2617 auto *ODRIndicatorSym =
2626 ODRIndicatorSym->setAlignment(
Align(1));
2627 ODRIndicator = ODRIndicatorSym;
2633 ConstantInt::get(IntptrTy, SizeInBytes),
2634 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2637 ConstantInt::get(IntptrTy, MD.
IsDynInit),
2643 Initializers[i] = Initializer;
2649 for (
size_t i = 0; i < n; i++) {
2651 if (
G->getName().empty())
continue;
2656 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2663 }
else if (n == 0) {
2666 *CtorComdat = TargetTriple.isOSBinFormatELF();
2668 *CtorComdat =
false;
2669 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2670 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2671 }
else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2672 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2674 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2680 createInitializerPoisonCalls();
2686ModuleAddressSanitizer::getRedzoneSizeForGlobal(
uint64_t SizeInBytes)
const {
2687 constexpr uint64_t kMaxRZ = 1 << 18;
2688 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2691 if (SizeInBytes <= MinRZ / 2) {
2695 RZ = MinRZ - SizeInBytes;
2698 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2701 if (SizeInBytes % MinRZ)
2702 RZ += MinRZ - (SizeInBytes % MinRZ);
2705 assert((RZ + SizeInBytes) % MinRZ == 0);
2710int ModuleAddressSanitizer::GetAsanVersion()
const {
2711 int LongSize =
M.getDataLayout().getPointerSizeInBits();
2716 Version += (LongSize == 32 && isAndroid);
2731bool ModuleAddressSanitizer::instrumentModule() {
2732 initializeCallbacks();
2737 if (CompileKernel) {
2742 std::string AsanVersion = std::to_string(GetAsanVersion());
2743 std::string VersionCheckName =
2745 std::tie(AsanCtorFunction, std::ignore) =
2748 {}, VersionCheckName);
2752 bool CtorComdat =
true;
2755 if (AsanCtorFunction) {
2756 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2757 instrumentGlobals(IRB, &CtorComdat);
2760 instrumentGlobals(IRB, &CtorComdat);
2769 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2770 if (AsanCtorFunction) {
2774 if (AsanDtorFunction) {
2779 if (AsanCtorFunction)
2781 if (AsanDtorFunction)
2792 for (
int Exp = 0;
Exp < 2;
Exp++) {
2793 for (
size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2794 const std::string TypeStr = AccessIsWrite ?
"store" :
"load";
2795 const std::string ExpStr =
Exp ?
"exp_" :
"";
2796 const std::string EndingStr = Recover ?
"_noabort" :
"";
2805 Args1.push_back(ExpType);
2806 if (
auto AK = TLI->getExtAttrForI32Param(
false)) {
2811 AsanErrorCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2815 AsanMemoryAccessCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2820 AccessSizeIndex++) {
2821 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2822 AsanErrorCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2823 M.getOrInsertFunction(
2827 AsanMemoryAccessCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2828 M.getOrInsertFunction(
2835 const std::string MemIntrinCallbackPrefix =
2839 AsanMemmove =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memmove",
2840 PtrTy, PtrTy, PtrTy, IntptrTy);
2841 AsanMemcpy =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memcpy", PtrTy,
2842 PtrTy, PtrTy, IntptrTy);
2843 AsanMemset =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memset",
2847 AsanHandleNoReturnFunc =
2850 AsanPtrCmpFunction =
2852 AsanPtrSubFunction =
2854 if (Mapping.InGlobal)
2855 AsanShadowGlobal =
M.getOrInsertGlobal(
"__asan_shadow",
2858 AMDGPUAddressShared =
2860 AMDGPUAddressPrivate =
2864bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(
Function &
F) {
2872 if (
F.getName().contains(
" load]")) {
2882bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(
Function &
F) {
2888 if (Mapping.InGlobal) {
2896 LocalDynamicShadow =
2897 IRB.
CreateCall(Asm, {AsanShadowGlobal},
".asan.shadow");
2899 LocalDynamicShadow =
2903 Value *GlobalDynamicAddress =
F.getParent()->getOrInsertGlobal(
2905 LocalDynamicShadow = IRB.
CreateLoad(IntptrTy, GlobalDynamicAddress);
2910void AddressSanitizer::markEscapedLocalAllocas(
Function &
F) {
2915 assert(ProcessedAllocas.empty() &&
"must process localescape before allocas");
2919 if (!
F.getParent()->getFunction(
"llvm.localescape"))
return;
2925 if (
II &&
II->getIntrinsicID() == Intrinsic::localescape) {
2927 for (
Value *Arg :
II->args()) {
2928 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2930 "non-static alloca arg to localescape");
2931 ProcessedAllocas[AI] =
false;
2938bool AddressSanitizer::suppressInstrumentationSiteForDebug(
int &Instrumented) {
2939 bool ShouldInstrument =
2943 return !ShouldInstrument;
2946bool AddressSanitizer::instrumentFunction(
Function &
F,
2952 if (
F.getName().starts_with(
"__asan_"))
return false;
2953 if (
F.isPresplitCoroutine())
2956 bool FunctionModified =
false;
2959 if (
F.hasFnAttribute(Attribute::Naked))
2960 return FunctionModified;
2965 if (maybeInsertAsanInitAtFunctionEntry(
F))
2966 FunctionModified =
true;
2969 if (!
F.hasFnAttribute(Attribute::SanitizeAddress))
return FunctionModified;
2971 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2972 return FunctionModified;
2976 initializeCallbacks(TLI);
2978 FunctionStateRAII CleanupObj(
this);
2980 RuntimeCallInserter RTCI(
F);
2982 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(
F);
2986 markEscapedLocalAllocas(
F);
2998 for (
auto &BB :
F) {
3000 TempsToInstrument.
clear();
3001 int NumInsnsPerBB = 0;
3002 for (
auto &Inst : BB) {
3003 if (LooksLikeCodeInBug11395(&Inst))
return false;
3010 if (!InterestingOperands.
empty()) {
3011 for (
auto &Operand : InterestingOperands) {
3017 if (Operand.MaybeMask) {
3021 if (!TempsToInstrument.
insert(
Ptr).second)
3025 OperandsToInstrument.
push_back(Operand);
3032 PointerComparisonsOrSubtracts.
push_back(&Inst);
3038 if (
auto *CB = dyn_cast<CallBase>(&Inst)) {
3040 TempsToInstrument.
clear();
3044 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
3051 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3052 OperandsToInstrument.
size() + IntrinToInstrument.
size() >
3053 (
unsigned)InstrumentationWithCallsThreshold);
3058 int NumInstrumented = 0;
3059 for (
auto &Operand : OperandsToInstrument) {
3060 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3061 instrumentMop(ObjSizeVis, Operand, UseCalls,
3062 F.getDataLayout(), RTCI);
3063 FunctionModified =
true;
3065 for (
auto *Inst : IntrinToInstrument) {
3066 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3067 instrumentMemIntrinsic(Inst, RTCI);
3068 FunctionModified =
true;
3071 FunctionStackPoisoner FSP(
F, *
this, RTCI);
3072 bool ChangedStack = FSP.runOnFunction();
3076 for (
auto *CI : NoReturnCalls) {
3078 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3081 for (
auto *Inst : PointerComparisonsOrSubtracts) {
3082 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3083 FunctionModified =
true;
3086 if (ChangedStack || !NoReturnCalls.empty())
3087 FunctionModified =
true;
3089 LLVM_DEBUG(
dbgs() <<
"ASAN done instrumenting: " << FunctionModified <<
" "
3092 return FunctionModified;
3098bool AddressSanitizer::LooksLikeCodeInBug11395(
Instruction *
I) {
3099 if (LongSize != 32)
return false;
3108void FunctionStackPoisoner::initializeCallbacks(
Module &M) {
3112 const char *MallocNameTemplate =
3117 std::string Suffix = itostr(Index);
3118 AsanStackMallocFunc[
Index] =
M.getOrInsertFunction(
3119 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3120 AsanStackFreeFunc[
Index] =
3125 if (ASan.UseAfterScope) {
3126 AsanPoisonStackMemoryFunc =
M.getOrInsertFunction(
3128 AsanUnpoisonStackMemoryFunc =
M.getOrInsertFunction(
3132 for (
size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3133 0xf3, 0xf5, 0xf8}) {
3134 std::ostringstream
Name;
3136 Name << std::setw(2) << std::setfill(
'0') << std::hex << Val;
3137 AsanSetShadowFunc[Val] =
3138 M.getOrInsertFunction(
Name.str(), IRB.
getVoidTy(), IntptrTy, IntptrTy);
3141 AsanAllocaPoisonFunc =
M.getOrInsertFunction(
3143 AsanAllocasUnpoisonFunc =
M.getOrInsertFunction(
3149 size_t Begin,
size_t End,
3151 Value *ShadowBase) {
3155 const size_t LargestStoreSizeInBytes =
3156 std::min<size_t>(
sizeof(
uint64_t), ASan.LongSize / 8);
3158 const bool IsLittleEndian =
F.getDataLayout().isLittleEndian();
3164 for (
size_t i = Begin; i <
End;) {
3165 if (!ShadowMask[i]) {
3171 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3173 while (StoreSizeInBytes >
End - i)
3174 StoreSizeInBytes /= 2;
3177 for (
size_t j = StoreSizeInBytes - 1;
j && !ShadowMask[i +
j]; --
j) {
3178 while (j <= StoreSizeInBytes / 2)
3179 StoreSizeInBytes /= 2;
3183 for (
size_t j = 0;
j < StoreSizeInBytes;
j++) {
3185 Val |= (
uint64_t)ShadowBytes[i + j] << (8 * j);
3187 Val = (Val << 8) | ShadowBytes[i + j];
3196 i += StoreSizeInBytes;
3203 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.
size(), IRB, ShadowBase);
3208 size_t Begin,
size_t End,
3211 size_t Done = Begin;
3212 for (
size_t i = Begin, j = Begin + 1; i <
End; i =
j++) {
3213 if (!ShadowMask[i]) {
3218 if (!AsanSetShadowFunc[Val])
3222 for (;
j <
End && ShadowMask[
j] && Val == ShadowBytes[
j]; ++
j) {
3225 if (j - i >= ASan.MaxInlinePoisoningSize) {
3226 copyToShadowInline(ShadowMask, ShadowBytes,
Done, i, IRB, ShadowBase);
3227 RTCI.createRuntimeCall(
3228 IRB, AsanSetShadowFunc[Val],
3229 {IRB.
CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3230 ConstantInt::get(IntptrTy, j - i)});
3235 copyToShadowInline(ShadowMask, ShadowBytes,
Done,
End, IRB, ShadowBase);
3243 for (
int i = 0;; i++, MaxSize *= 2)
3244 if (LocalStackSize <= MaxSize)
return i;
3248void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3250 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3258 if (Arg.hasByValAttr()) {
3259 Type *Ty = Arg.getParamByValType();
3260 const Align Alignment =
3261 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3265 (Arg.hasName() ? Arg.getName() :
"Arg" +
Twine(Arg.getArgNo())) +
3268 Arg.replaceAllUsesWith(AI);
3270 uint64_t AllocSize =
DL.getTypeAllocSize(Ty);
3271 IRB.
CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3279 Value *ValueIfFalse) {
3282 PHI->addIncoming(ValueIfFalse, CondBlock);
3284 PHI->addIncoming(ValueIfTrue, ThenBlock);
3288Value *FunctionStackPoisoner::createAllocaForLayout(
3297 nullptr,
"MyAlloca");
3306void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3309 DynamicAllocaLayout = IRB.
CreateAlloca(IntptrTy,
nullptr);
3314void FunctionStackPoisoner::processDynamicAllocas() {
3321 for (
const auto &APC : DynamicAllocaPoisonCallVec) {
3324 assert(ASan.isInterestingAlloca(*APC.AI));
3325 assert(!APC.AI->isStaticAlloca());
3328 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3335 createDynamicAllocasInitStorage();
3336 for (
auto &AI : DynamicAllocaVec)
3337 handleDynamicAllocaCall(AI);
3338 unpoisonDynamicAllocas();
3350 for (
Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3360 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3362 if (
auto *Store = dyn_cast<StoreInst>(It)) {
3366 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3367 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3370 Value *Val = Store->getValueOperand();
3371 bool IsDirectArgInit = isa<Argument>(Val);
3372 bool IsArgInitViaCast =
3373 isa<CastInst>(Val) &&
3374 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3377 Val == It->getPrevNonDebugInstruction();
3378 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3382 if (IsArgInitViaCast)
3383 InitInsts.
push_back(cast<Instruction>(Val));
3394void FunctionStackPoisoner::processStaticAllocas() {
3395 if (AllocaVec.
empty()) {
3400 int StackMallocIdx = -1;
3402 if (
auto SP =
F.getSubprogram())
3403 EntryDebugLocation =
3412 auto InsBeforeB = InsBefore->
getParent();
3413 assert(InsBeforeB == &
F.getEntryBlock());
3414 for (
auto *AI : StaticAllocasToMoveUp)
3425 ArgInitInst->moveBefore(InsBefore);
3428 if (LocalEscapeCall) LocalEscapeCall->
moveBefore(InsBefore);
3434 ASan.getAllocaSizeInBytes(*AI),
3445 uint64_t Granularity = 1ULL << Mapping.Scale;
3446 uint64_t MinHeaderSize = std::max((
uint64_t)ASan.LongSize / 2, Granularity);
3452 for (
auto &
Desc : SVD)
3456 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3459 assert(ASan.isInterestingAlloca(*APC.AI));
3460 assert(APC.AI->isStaticAlloca());
3465 if (
const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3466 if (LifetimeLoc->getFile() == FnLoc->getFile())
3467 if (
unsigned Line = LifetimeLoc->getLine())
3468 Desc.Line = std::min(
Desc.Line ?
Desc.Line : Line, Line);
3474 LLVM_DEBUG(
dbgs() << DescriptionString <<
" --- " <<
L.FrameSize <<
"\n");
3476 bool DoStackMalloc =
3486 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3487 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3489 Value *StaticAlloca =
3490 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L,
false);
3493 Value *LocalStackBase;
3494 Value *LocalStackBaseAlloca;
3497 if (DoStackMalloc) {
3498 LocalStackBaseAlloca =
3499 IRB.
CreateAlloca(IntptrTy,
nullptr,
"asan_local_stack_base");
3506 Constant *OptionDetectUseAfterReturn =
F.getParent()->getOrInsertGlobal(
3516 Value *FakeStackValue =
3517 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3518 ConstantInt::get(IntptrTy, LocalStackSize));
3520 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3521 ConstantInt::get(IntptrTy, 0));
3529 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3530 ConstantInt::get(IntptrTy, LocalStackSize));
3532 Value *NoFakeStack =
3537 Value *AllocaValue =
3538 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L,
true) : StaticAlloca;
3541 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3542 IRB.
CreateStore(LocalStackBase, LocalStackBaseAlloca);
3547 FakeStack = ConstantInt::get(IntptrTy, 0);
3549 DoDynamicAlloca ? createAllocaForLayout(IRB, L,
true) : StaticAlloca;
3550 LocalStackBaseAlloca = LocalStackBase;
3556 Value *LocalStackBaseAllocaPtr =
3557 isa<PtrToIntInst>(LocalStackBaseAlloca)
3558 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3559 : LocalStackBaseAlloca;
3560 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3561 "Variable descriptions relative to ASan stack base will be dropped");
3564 for (
const auto &
Desc : SVD) {
3569 IRB.
CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
Desc.Offset)),
3582 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3592 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3599 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3602 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3604 if (!StaticAllocaPoisonCallVec.empty()) {
3608 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3611 size_t Begin =
Desc.Offset /
L.Granularity;
3612 size_t End = Begin + (APC.Size +
L.Granularity - 1) /
L.Granularity;
3615 copyToShadow(ShadowAfterScope,
3616 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin,
End,
3630 if (DoStackMalloc) {
3631 assert(StackMallocIdx >= 0);
3648 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3650 ShadowAfterReturn.
resize(ClassSize /
L.Granularity,
3652 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3654 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3656 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3657 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3658 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3659 IRBPoison.CreateStore(
3661 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3664 RTCI.createRuntimeCall(
3665 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3666 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3670 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3672 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3677 for (
auto *AI : AllocaVec)
3685 Value *SizeArg = ConstantInt::get(IntptrTy,
Size);
3686 RTCI.createRuntimeCall(
3687 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3688 {AddrArg, SizeArg});
3699void FunctionStackPoisoner::handleDynamicAllocaCall(
AllocaInst *AI) {
3707 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3713 const unsigned ElementSize =
3717 ConstantInt::get(IntptrTy, ElementSize));
3745 ConstantInt::get(IntptrTy, Alignment.
value()));
3748 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
static const uint64_t kEmscriptenShadowOffset
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This defines the Use class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
@ SameSize
The data referenced by the COMDAT must be the same size.
@ Any
The linker may choose any COMDAT.
@ NoDeduplicate
No deduplication is performed.
@ ExactMatch
The data referenced by the COMDAT must be the same.
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
DILocation * get() const
Get the underlying DILocation.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const Constant * getAliasee() const
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
void setComdat(Comdat *C)
void setSection(StringRef S)
Change the section for this global.
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ CommonLinkage
Tentative definitions.
@ InternalLinkage
Rename collisions when linking (static functions).
@ AvailableExternallyLinkage
Available for inspection, not emission.
@ ExternalWeakLinkage
ExternalWeak linkage description.
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Base class for instruction visitors.
RetTy visitCallBase(CallBase &I)
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
RetTy visitIntrinsicInst(IntrinsicInst &I)
void visit(Iterator Start, Iterator End)
RetTy visitReturnInst(ReturnInst &I)
RetTy visitAllocaInst(AllocaInst &I)
RetTy visitResumeInst(ResumeInst &I)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Evaluate the size and offset of an object pointed to by a Value* statically.
SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Pass interface - Implemented by all 'passes'.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
bool isDriverKit() const
Is this an Apple DriverKit triple.
bool isAndroid() const
Tests whether the target is Android.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isOSWindows() const
Tests whether the OS is Windows.
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isOSLinux() const
Tests whether the OS is Linux.
bool isMacOSX() const
Is this a Mac OS X triple.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
bool isWatchOS() const
Is this an Apple watchOS triple.
bool isiOS() const
Is this an iOS triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt32Ty(LLVMContext &C)
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Linkage
Describes symbol linkage. This can be used to resolve definition clashes.
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, Instruction *InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
@ Dynamic
Denotes mode unknown at compile time.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AsanCtorKind
Types of ASan module constructors supported.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
int InstrumentationWithCallsThreshold
uint32_t MaxInlinePoisoningSize
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.