184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
215#define DEBUG_TYPE "msan"
218 "Controls which checks to insert");
221 "Controls which instruction to instrument");
239 "msan-track-origins",
244 cl::desc(
"keep going after reporting a UMR"),
253 "msan-poison-stack-with-call",
258 "msan-poison-stack-pattern",
259 cl::desc(
"poison uninitialized stack variables with the given pattern"),
264 cl::desc(
"Print name of local stack variable"),
273 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
278 cl::desc(
"exact handling of relational integer ICmp"),
282 "msan-handle-lifetime-intrinsics",
284 "when possible, poison scoped variables at the beginning of the scope "
285 "(slower, but more precise)"),
296 "msan-handle-asm-conservative",
307 "msan-check-access-address",
308 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
313 cl::desc(
"check arguments and return values at function call boundaries"),
317 "msan-dump-strict-instructions",
318 cl::desc(
"print out instructions with default strict semantics"),
322 "msan-instrumentation-with-call-threshold",
324 "If the function being instrumented requires more than "
325 "this number of checks and origin stores, use callbacks instead of "
326 "inline checks (-1 means never use callbacks)."),
331 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
341 cl::desc(
"Insert checks for constant shadow values"),
348 cl::desc(
"Place MSan constructors in comdat sections"),
354 cl::desc(
"Define custom MSan AndMask"),
358 cl::desc(
"Define custom MSan XorMask"),
362 cl::desc(
"Define custom MSan ShadowBase"),
366 cl::desc(
"Define custom MSan OriginBase"),
371 cl::desc(
"Define threshold for number of checks per "
372 "debug location to force origin update."),
384struct MemoryMapParams {
391struct PlatformMemoryMapParams {
392 const MemoryMapParams *bits32;
393 const MemoryMapParams *bits64;
555class MemorySanitizer {
564 MemorySanitizer(MemorySanitizer &&) =
delete;
565 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
566 MemorySanitizer(
const MemorySanitizer &) =
delete;
567 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
572 friend struct MemorySanitizerVisitor;
573 friend struct VarArgHelperBase;
574 friend struct VarArgAMD64Helper;
575 friend struct VarArgAArch64Helper;
576 friend struct VarArgPowerPCHelper;
577 friend struct VarArgSystemZHelper;
578 friend struct VarArgI386Helper;
579 friend struct VarArgGenericHelper;
581 void initializeModule(
Module &M);
586 template <
typename... ArgsTy>
613 Value *ParamOriginTLS;
619 Value *RetvalOriginTLS;
625 Value *VAArgOriginTLS;
628 Value *VAArgOverflowSizeTLS;
631 bool CallbacksInitialized =
false;
676 Value *MsanMetadataAlloca;
682 const MemoryMapParams *MapParams;
686 MemoryMapParams CustomMapParams;
691 MDNode *OriginStoreWeights;
694void insertModuleCtor(
Module &M) {
722 Recover(getOptOrDefault(
ClKeepGoing, Kernel || R)),
740 MemorySanitizer Msan(*
F.getParent(),
Options);
759 OS, MapClassName2PassName);
766 OS <<
"eager-checks;";
767 OS <<
"track-origins=" <<
Options.TrackOrigins;
783template <
typename... ArgsTy>
790 std::forward<ArgsTy>(Args)...);
793 return M.getOrInsertFunction(
Name, MsanMetadata,
794 std::forward<ArgsTy>(Args)...);
803 RetvalOriginTLS =
nullptr;
805 ParamOriginTLS =
nullptr;
807 VAArgOriginTLS =
nullptr;
808 VAArgOverflowSizeTLS =
nullptr;
810 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
812 IRB.getVoidTy(), IRB.getInt32Ty());
823 MsanGetContextStateFn =
824 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
828 for (
int ind = 0, size = 1; ind < 4; ind++,
size <<= 1) {
829 std::string name_load =
830 "__msan_metadata_ptr_for_load_" + std::to_string(size);
831 std::string name_store =
832 "__msan_metadata_ptr_for_store_" + std::to_string(size);
833 MsanMetadataPtrForLoad_1_8[ind] =
834 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
835 MsanMetadataPtrForStore_1_8[ind] =
836 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
839 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
840 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IRB.getInt64Ty());
841 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
842 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IRB.getInt64Ty());
845 MsanPoisonAllocaFn =
M.getOrInsertFunction(
846 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
847 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
848 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
852 return M.getOrInsertGlobal(
Name, Ty, [&] {
854 nullptr,
Name,
nullptr,
860void MemorySanitizer::createUserspaceApi(
Module &M,
868 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
869 :
"__msan_warning_with_origin_noreturn";
870 WarningFn =
M.getOrInsertFunction(WarningFnName,
872 IRB.getVoidTy(), IRB.getInt32Ty());
875 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
876 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
902 VAArgOverflowSizeTLS =
907 unsigned AccessSize = 1 << AccessSizeIndex;
908 std::string FunctionName =
"__msan_maybe_warning_" + itostr(AccessSize);
909 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
911 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
913 FunctionName =
"__msan_maybe_store_origin_" + itostr(AccessSize);
914 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
916 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
920 MsanSetAllocaOriginWithDescriptionFn =
921 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
922 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
923 MsanSetAllocaOriginNoDescriptionFn =
924 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
925 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
926 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
927 IRB.getVoidTy(), PtrTy, IntptrTy);
931void MemorySanitizer::initializeCallbacks(
Module &M,
934 if (CallbacksInitialized)
940 MsanChainOriginFn =
M.getOrInsertFunction(
941 "__msan_chain_origin",
944 MsanSetOriginFn =
M.getOrInsertFunction(
946 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
948 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
950 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
951 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
953 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
955 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
956 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
959 createKernelApi(M, TLI);
961 createUserspaceApi(M, TLI);
963 CallbacksInitialized =
true;
969 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
987void MemorySanitizer::initializeModule(
Module &M) {
988 auto &
DL =
M.getDataLayout();
990 TargetTriple =
Triple(
M.getTargetTriple());
992 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
993 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
995 if (ShadowPassed || OriginPassed) {
1000 MapParams = &CustomMapParams;
1002 switch (TargetTriple.getOS()) {
1004 switch (TargetTriple.getArch()) {
1019 switch (TargetTriple.getArch()) {
1028 switch (TargetTriple.getArch()) {
1062 C = &(
M.getContext());
1064 IntptrTy = IRB.getIntPtrTy(
DL);
1065 OriginTy = IRB.getInt32Ty();
1066 PtrTy = IRB.getPtrTy();
1071 if (!CompileKernel) {
1073 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1074 return new GlobalVariable(
1075 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1076 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1080 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1081 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1082 GlobalValue::WeakODRLinkage,
1083 IRB.getInt32(Recover),
"__msan_keep_going");
1098struct VarArgHelper {
1099 virtual ~VarArgHelper() =
default;
1114 virtual void finalizeInstrumentation() = 0;
1117struct MemorySanitizerVisitor;
1122 MemorySanitizerVisitor &Visitor);
1129 if (TypeSizeFixed <= 8)
1138class NextNodeIRBuilder :
public IRBuilder<> {
1151struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1153 MemorySanitizer &MS;
1156 std::unique_ptr<VarArgHelper> VAHelper;
1164 bool PropagateShadow;
1168 struct ShadowOriginAndInsertPoint {
1174 : Shadow(S), Origin(
O), OrigIns(
I) {}
1182 int64_t SplittableBlocksCount = 0;
1184 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS,
1187 bool SanitizeFunction =
1189 InsertChecks = SanitizeFunction;
1190 PropagateShadow = SanitizeFunction;
1200 MS.initializeCallbacks(*
F.getParent(), TLI);
1201 FnPrologueEnd =
IRBuilder<>(
F.getEntryBlock().getFirstNonPHI())
1204 if (MS.CompileKernel) {
1206 insertKmsanPrologue(IRB);
1210 <<
"MemorySanitizer is not inserting checks into '"
1211 <<
F.getName() <<
"'\n");
1214 bool instrumentWithCalls(
Value *V) {
1216 if (isa<Constant>(V))
1219 ++SplittableBlocksCount;
1225 return I.getParent() == FnPrologueEnd->
getParent() &&
1226 (&
I == FnPrologueEnd ||
I.comesBefore(FnPrologueEnd));
1234 if (MS.TrackOrigins <= 1)
1236 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1241 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1253 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1254 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1266 auto [InsertPt,
Index] =
1278 Align CurrentAlignment = Alignment;
1279 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1280 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1282 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1287 CurrentAlignment = IntptrAlignment;
1305 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1306 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1314 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1323 if (instrumentWithCalls(ConvertedShadow) &&
1326 Value *ConvertedShadow2 =
1332 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1336 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1341 void materializeStores() {
1344 Value *Val =
SI->getValueOperand();
1346 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1347 Value *ShadowPtr, *OriginPtr;
1349 const Align Alignment =
SI->getAlign();
1351 std::tie(ShadowPtr, OriginPtr) =
1352 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
true);
1361 if (MS.TrackOrigins && !
SI->isAtomic())
1362 storeOrigin(IRB,
Addr, Shadow, getOrigin(Val), OriginPtr,
1369 if (MS.TrackOrigins < 2)
1372 if (LazyWarningDebugLocationCount.
empty())
1373 for (
const auto &
I : InstrumentationList)
1374 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1388 if (
Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1390 auto NewDebugLoc = OI->getDebugLoc();
1397 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1398 Origin = updateOrigin(Origin, IRBOrigin);
1403 if (MS.CompileKernel || MS.TrackOrigins)
1417 if (instrumentWithCalls(ConvertedShadow) &&
1421 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1422 Value *ConvertedShadow2 =
1425 Fn, {ConvertedShadow2,
1426 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1430 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1433 !MS.Recover, MS.ColdCallWeights);
1436 insertWarningFn(IRB, Origin);
1441 void materializeInstructionChecks(
1446 bool Combine = !MS.TrackOrigins;
1448 Value *Shadow =
nullptr;
1449 for (
const auto &ShadowData : InstructionChecks) {
1453 Value *ConvertedShadow = ShadowData.Shadow;
1455 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1462 insertWarningFn(IRB, ShadowData.Origin);
1472 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1477 Shadow = ConvertedShadow;
1481 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1482 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1483 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1489 materializeOneCheck(IRB, Shadow,
nullptr);
1493 void materializeChecks() {
1499 for (
auto I = InstrumentationList.begin();
1500 I != InstrumentationList.end();) {
1501 auto OrigIns =
I->OrigIns;
1505 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1506 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1507 return OrigIns != R.OrigIns;
1521 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1522 {Zero, IRB.getInt32(0)},
"param_shadow");
1523 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1524 {Zero, IRB.getInt32(1)},
"retval_shadow");
1525 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1526 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1527 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1528 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1529 MS.VAArgOverflowSizeTLS =
1530 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1531 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1532 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1533 {Zero, IRB.getInt32(5)},
"param_origin");
1534 MS.RetvalOriginTLS =
1535 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1536 {Zero, IRB.getInt32(6)},
"retval_origin");
1538 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1555 for (
PHINode *PN : ShadowPHINodes) {
1556 PHINode *PNS = cast<PHINode>(getShadow(PN));
1557 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1558 size_t NumValues = PN->getNumIncomingValues();
1559 for (
size_t v = 0;
v < NumValues;
v++) {
1560 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1562 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1566 VAHelper->finalizeInstrumentation();
1570 if (InstrumentLifetimeStart) {
1571 for (
auto Item : LifetimeStartList) {
1572 instrumentAlloca(*Item.second, Item.first);
1573 AllocaSet.
remove(Item.second);
1579 instrumentAlloca(*AI);
1582 materializeChecks();
1586 materializeStores();
1592 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1604 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1605 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1607 VT->getElementCount());
1609 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1610 return ArrayType::get(getShadowTy(AT->getElementType()),
1611 AT->getNumElements());
1613 if (
StructType *ST = dyn_cast<StructType>(OrigTy)) {
1615 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1616 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1618 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1634 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1636 if (Aggregator != FalseVal)
1637 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1639 Aggregator = ShadowBool;
1648 if (!
Array->getNumElements())
1652 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1656 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1657 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1667 return collapseStructShadow(
Struct, V, IRB);
1668 if (
ArrayType *Array = dyn_cast<ArrayType>(
V->getType()))
1669 return collapseArrayShadow(Array, V, IRB);
1670 if (isa<VectorType>(
V->getType())) {
1671 if (isa<ScalableVectorType>(
V->getType()))
1674 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1682 Type *VTy =
V->getType();
1684 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1691 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1692 if (
VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1693 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1694 VectTy->getElementCount());
1700 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1701 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1702 return VectorType::get(
1703 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1704 VectTy->getElementCount());
1706 assert(IntPtrTy == MS.IntptrTy);
1711 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1713 VectTy->getElementCount(),
1714 constToIntPtr(VectTy->getElementType(),
C));
1716 assert(IntPtrTy == MS.IntptrTy);
1717 return ConstantInt::get(MS.IntptrTy,
C);
1728 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1731 if (
uint64_t AndMask = MS.MapParams->AndMask)
1732 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1734 if (
uint64_t XorMask = MS.MapParams->XorMask)
1735 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1747 std::pair<Value *, Value *>
1754 assert(VectTy->getElementType()->isPointerTy());
1756 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1757 Value *ShadowOffset = getShadowPtrOffset(
Addr, IRB);
1758 Value *ShadowLong = ShadowOffset;
1759 if (
uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1761 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1764 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1766 Value *OriginPtr =
nullptr;
1767 if (MS.TrackOrigins) {
1768 Value *OriginLong = ShadowOffset;
1769 uint64_t OriginBase = MS.MapParams->OriginBase;
1770 if (OriginBase != 0)
1772 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1775 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1778 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1780 return std::make_pair(ShadowPtr, OriginPtr);
1783 template <
typename... ArgsTy>
1788 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1789 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1792 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1795 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *
Addr,
1799 Value *ShadowOriginPtrs;
1806 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1808 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1809 ShadowOriginPtrs = createMetadataCall(
1811 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1818 return std::make_pair(ShadowPtr, OriginPtr);
1824 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *
Addr,
1831 return getShadowOriginPtrKernelNoVec(
Addr, IRB, ShadowTy,
isStore);
1835 unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1836 Value *ShadowPtrs = ConstantInt::getNullValue(
1838 Value *OriginPtrs =
nullptr;
1839 if (MS.TrackOrigins)
1840 OriginPtrs = ConstantInt::getNullValue(
1842 for (
unsigned i = 0; i < NumElements; ++i) {
1845 auto [ShadowPtr, OriginPtr] =
1846 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1849 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1850 if (MS.TrackOrigins)
1852 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1854 return {ShadowPtrs, OriginPtrs};
1861 if (MS.CompileKernel)
1862 return getShadowOriginPtrKernel(
Addr, IRB, ShadowTy,
isStore);
1863 return getShadowOriginPtrUserspace(
Addr, IRB, ShadowTy, Alignment);
1878 if (!MS.TrackOrigins)
1892 Value *getOriginPtrForRetval() {
1894 return MS.RetvalOriginTLS;
1899 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1900 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1905 if (!MS.TrackOrigins)
1907 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1908 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1909 OriginMap[
V] = Origin;
1913 Type *ShadowTy = getShadowTy(OrigTy);
1923 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1928 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1930 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1932 getPoisonedShadow(AT->getElementType()));
1935 if (
StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1937 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1938 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1946 Type *ShadowTy = getShadowTy(V);
1949 return getPoisonedShadow(ShadowTy);
1961 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
1962 return getCleanShadow(V);
1964 Value *Shadow = ShadowMap[
V];
1966 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1968 assert(Shadow &&
"No shadow for a value");
1972 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1973 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1974 : getCleanShadow(V);
1979 if (
Argument *
A = dyn_cast<Argument>(V)) {
1981 Value *&ShadowPtr = ShadowMap[
V];
1986 unsigned ArgOffset = 0;
1988 for (
auto &FArg :
F->args()) {
1989 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
1991 ?
"vscale not fully supported\n"
1992 :
"Arg is not sized\n"));
1994 ShadowPtr = getCleanShadow(V);
1995 setOrigin(
A, getCleanOrigin());
2001 unsigned Size = FArg.hasByValAttr()
2002 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2003 :
DL.getTypeAllocSize(FArg.getType());
2007 if (FArg.hasByValAttr()) {
2011 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2012 FArg.getParamAlign(), FArg.getParamByValType());
2013 Value *CpShadowPtr, *CpOriginPtr;
2014 std::tie(CpShadowPtr, CpOriginPtr) =
2015 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2017 if (!PropagateShadow || Overflow) {
2019 EntryIRB.CreateMemSet(
2023 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2025 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign,
Base,
2030 if (MS.TrackOrigins) {
2031 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2035 EntryIRB.CreateMemCpy(
2044 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2045 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2046 ShadowPtr = getCleanShadow(V);
2047 setOrigin(
A, getCleanOrigin());
2050 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2051 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2053 if (MS.TrackOrigins) {
2054 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2055 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2059 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2065 assert(ShadowPtr &&
"Could not find shadow for an argument");
2069 return getCleanShadow(V);
2074 return getShadow(
I->getOperand(i));
2079 if (!MS.TrackOrigins)
2081 if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2082 return getCleanOrigin();
2083 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2084 "Unexpected value type in getOrigin()");
2086 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2087 return getCleanOrigin();
2089 Value *Origin = OriginMap[
V];
2090 assert(Origin &&
"Missing origin");
2096 return getOrigin(
I->getOperand(i));
2109 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2110 << *OrigIns <<
"\n");
2115 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2116 isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2117 "Can only insert checks for integer, vector, and aggregate shadow "
2120 InstrumentationList.push_back(
2121 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2130 Value *Shadow, *Origin;
2132 Shadow = getShadow(Val);
2135 Origin = getOrigin(Val);
2137 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2140 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2142 insertShadowCheck(Shadow, Origin, OrigIns);
2147 case AtomicOrdering::NotAtomic:
2148 return AtomicOrdering::NotAtomic;
2149 case AtomicOrdering::Unordered:
2150 case AtomicOrdering::Monotonic:
2151 case AtomicOrdering::Release:
2152 return AtomicOrdering::Release;
2153 case AtomicOrdering::Acquire:
2154 case AtomicOrdering::AcquireRelease:
2155 return AtomicOrdering::AcquireRelease;
2156 case AtomicOrdering::SequentiallyConsistent:
2157 return AtomicOrdering::SequentiallyConsistent;
2163 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2164 uint32_t OrderingTable[NumOrderings] = {};
2166 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2167 OrderingTable[(
int)AtomicOrderingCABI::release] =
2168 (int)AtomicOrderingCABI::release;
2169 OrderingTable[(int)AtomicOrderingCABI::consume] =
2170 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2171 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2172 (
int)AtomicOrderingCABI::acq_rel;
2173 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2174 (
int)AtomicOrderingCABI::seq_cst;
2181 case AtomicOrdering::NotAtomic:
2182 return AtomicOrdering::NotAtomic;
2183 case AtomicOrdering::Unordered:
2184 case AtomicOrdering::Monotonic:
2185 case AtomicOrdering::Acquire:
2186 return AtomicOrdering::Acquire;
2187 case AtomicOrdering::Release:
2188 case AtomicOrdering::AcquireRelease:
2189 return AtomicOrdering::AcquireRelease;
2190 case AtomicOrdering::SequentiallyConsistent:
2191 return AtomicOrdering::SequentiallyConsistent;
2197 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2198 uint32_t OrderingTable[NumOrderings] = {};
2200 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2201 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2202 OrderingTable[(int)AtomicOrderingCABI::consume] =
2203 (
int)AtomicOrderingCABI::acquire;
2204 OrderingTable[(int)AtomicOrderingCABI::release] =
2205 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2206 (int)AtomicOrderingCABI::acq_rel;
2207 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2208 (
int)AtomicOrderingCABI::seq_cst;
2216 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2219 if (isInPrologue(
I))
2224 setShadow(&
I, getCleanShadow(&
I));
2225 setOrigin(&
I, getCleanOrigin());
2237 assert(
I.getType()->isSized() &&
"Load type must have size");
2238 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2239 NextNodeIRBuilder IRB(&
I);
2240 Type *ShadowTy = getShadowTy(&
I);
2242 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2243 const Align Alignment =
I.getAlign();
2244 if (PropagateShadow) {
2245 std::tie(ShadowPtr, OriginPtr) =
2246 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2250 setShadow(&
I, getCleanShadow(&
I));
2254 insertShadowCheck(
I.getPointerOperand(), &
I);
2259 if (MS.TrackOrigins) {
2260 if (PropagateShadow) {
2265 setOrigin(&
I, getCleanOrigin());
2275 StoreList.push_back(&
I);
2277 insertShadowCheck(
I.getPointerOperand(), &
I);
2281 assert(isa<AtomicRMWInst>(
I) || isa<AtomicCmpXchgInst>(
I));
2285 Value *Val =
I.getOperand(1);
2286 Value *ShadowPtr = getShadowOriginPtr(
Addr, IRB, getShadowTy(Val),
Align(1),
2291 insertShadowCheck(
Addr, &
I);
2296 if (isa<AtomicCmpXchgInst>(
I))
2297 insertShadowCheck(Val, &
I);
2301 setShadow(&
I, getCleanShadow(&
I));
2302 setOrigin(&
I, getCleanOrigin());
2317 insertShadowCheck(
I.getOperand(1), &
I);
2321 setOrigin(&
I, getOrigin(&
I, 0));
2325 insertShadowCheck(
I.getOperand(2), &
I);
2327 auto *Shadow0 = getShadow(&
I, 0);
2328 auto *Shadow1 = getShadow(&
I, 1);
2331 setOriginForNaryOp(
I);
2336 auto *Shadow0 = getShadow(&
I, 0);
2337 auto *Shadow1 = getShadow(&
I, 1);
2340 setOriginForNaryOp(
I);
2346 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2347 setOrigin(&
I, getOrigin(&
I, 0));
2352 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2353 setOrigin(&
I, getOrigin(&
I, 0));
2358 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2359 setOrigin(&
I, getOrigin(&
I, 0));
2366 if (
auto *CI = dyn_cast<CallInst>(
I.getOperand(0)))
2367 if (CI->isMustTailCall())
2371 setOrigin(&
I, getOrigin(&
I, 0));
2377 "_msprop_ptrtoint"));
2378 setOrigin(&
I, getOrigin(&
I, 0));
2384 "_msprop_inttoptr"));
2385 setOrigin(&
I, getOrigin(&
I, 0));
2388 void visitFPToSIInst(
CastInst &
I) { handleShadowOr(
I); }
2389 void visitFPToUIInst(
CastInst &
I) { handleShadowOr(
I); }
2390 void visitSIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2391 void visitUIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2392 void visitFPExtInst(
CastInst &
I) { handleShadowOr(
I); }
2393 void visitFPTruncInst(
CastInst &
I) { handleShadowOr(
I); }
2408 Value *S2 = getShadow(&
I, 1);
2409 Value *V1 =
I.getOperand(0);
2418 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2419 setOriginForNaryOp(
I);
2430 Value *S2 = getShadow(&
I, 1);
2440 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2441 setOriginForNaryOp(
I);
2459 template <
bool CombineShadow>
class Combiner {
2460 Value *Shadow =
nullptr;
2461 Value *Origin =
nullptr;
2463 MemorySanitizerVisitor *MSV;
2467 : IRB(IRB), MSV(MSV) {}
2471 if (CombineShadow) {
2476 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2477 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2481 if (MSV->MS.TrackOrigins) {
2486 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2488 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2489 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2499 Value *OpShadow = MSV->getShadow(V);
2500 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2501 return Add(OpShadow, OpOrigin);
2507 if (CombineShadow) {
2509 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2510 MSV->setShadow(
I, Shadow);
2512 if (MSV->MS.TrackOrigins) {
2514 MSV->setOrigin(
I, Origin);
2521 if (MSV->MS.TrackOrigins) {
2533 if (!MS.TrackOrigins)
2536 OriginCombiner
OC(
this, IRB);
2537 for (
Use &
Op :
I.operands())
2542 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2544 "Vector of pointers is not a valid shadow type");
2545 return Ty->
isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2554 Type *srcTy =
V->getType();
2557 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2558 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2559 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2565 cast<VectorType>(dstTy)->getElementCount() ==
2566 cast<VectorType>(srcTy)->getElementCount())
2577 Type *ShadowTy = getShadowTy(V);
2578 if (
V->getType() == ShadowTy)
2580 if (
V->getType()->isPtrOrPtrVectorTy())
2589 ShadowAndOriginCombiner
SC(
this, IRB);
2590 for (
Use &
Op :
I.operands())
2610 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2611 unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2612 Type *EltTy = VTy->getElementType();
2614 for (
unsigned Idx = 0;
Idx < NumElements; ++
Idx) {
2617 const APInt &
V = Elt->getValue();
2619 Elements.push_back(ConstantInt::get(EltTy, V2));
2621 Elements.push_back(ConstantInt::get(EltTy, 1));
2626 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2627 const APInt &
V = Elt->getValue();
2629 ShadowMul = ConstantInt::get(Ty, V2);
2631 ShadowMul = ConstantInt::get(Ty, 1);
2637 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2638 setOrigin(&
I, getOrigin(OtherArg));
2642 Constant *constOp0 = dyn_cast<Constant>(
I.getOperand(0));
2643 Constant *constOp1 = dyn_cast<Constant>(
I.getOperand(1));
2644 if (constOp0 && !constOp1)
2645 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2646 else if (constOp1 && !constOp0)
2647 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2662 insertShadowCheck(
I.getOperand(1), &
I);
2663 setShadow(&
I, getShadow(&
I, 0));
2664 setOrigin(&
I, getOrigin(&
I, 0));
2681 void handleEqualityComparison(
ICmpInst &
I) {
2685 Value *Sa = getShadow(
A);
2686 Value *Sb = getShadow(
B);
2712 setOriginForNaryOp(
I);
2720 void handleRelationalComparisonExact(
ICmpInst &
I) {
2724 Value *Sa = getShadow(
A);
2725 Value *Sb = getShadow(
B);
2736 bool IsSigned =
I.isSigned();
2738 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2748 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2753 return std::make_pair(Min, Max);
2756 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
2757 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
2763 setOriginForNaryOp(
I);
2770 void handleSignedRelationalComparison(
ICmpInst &
I) {
2774 if ((constOp = dyn_cast<Constant>(
I.getOperand(1)))) {
2775 op =
I.getOperand(0);
2776 pre =
I.getPredicate();
2777 }
else if ((constOp = dyn_cast<Constant>(
I.getOperand(0)))) {
2778 op =
I.getOperand(1);
2779 pre =
I.getSwappedPredicate();
2792 setShadow(&
I, Shadow);
2793 setOrigin(&
I, getOrigin(
op));
2804 if (
I.isEquality()) {
2805 handleEqualityComparison(
I);
2811 handleRelationalComparisonExact(
I);
2815 handleSignedRelationalComparison(
I);
2820 if ((isa<Constant>(
I.getOperand(0)) || isa<Constant>(
I.getOperand(1)))) {
2821 handleRelationalComparisonExact(
I);
2828 void visitFCmpInst(
FCmpInst &
I) { handleShadowOr(
I); }
2835 Value *S2 = getShadow(&
I, 1);
2840 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2841 setOriginForNaryOp(
I);
2852 Value *S0 = getShadow(&
I, 0);
2854 Value *S2 = getShadow(&
I, 2);
2860 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2861 setOriginForNaryOp(
I);
2875 getShadow(
I.getArgOperand(1));
2878 {I.getArgOperand(0), I.getArgOperand(1),
2879 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2880 I.eraseFromParent();
2898 getShadow(
I.getArgOperand(1));
2901 {I.getArgOperand(0), I.getArgOperand(1),
2902 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2903 I.eraseFromParent();
2911 {I.getArgOperand(0),
2912 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2913 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2914 I.eraseFromParent();
2917 void visitVAStartInst(
VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
2919 void visitVACopyInst(
VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
2928 Value *Shadow = getShadow(&
I, 1);
2929 Value *ShadowPtr, *OriginPtr;
2933 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2938 insertShadowCheck(
Addr, &
I);
2941 if (MS.TrackOrigins)
2954 Type *ShadowTy = getShadowTy(&
I);
2955 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2956 if (PropagateShadow) {
2960 std::tie(ShadowPtr, OriginPtr) =
2961 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2965 setShadow(&
I, getCleanShadow(&
I));
2969 insertShadowCheck(
Addr, &
I);
2971 if (MS.TrackOrigins) {
2972 if (PropagateShadow)
2973 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
2975 setOrigin(&
I, getCleanOrigin());
2988 if (!(
RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy()))
2991 unsigned NumArgOperands =
I.arg_size();
2992 for (
unsigned i = 0; i < NumArgOperands; ++i) {
2993 Type *Ty =
I.getArgOperand(i)->getType();
2999 ShadowAndOriginCombiner
SC(
this, IRB);
3000 for (
unsigned i = 0; i < NumArgOperands; ++i)
3001 SC.Add(
I.getArgOperand(i));
3018 unsigned NumArgOperands =
I.arg_size();
3019 if (NumArgOperands == 0)
3022 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3023 I.getArgOperand(1)->getType()->isVectorTy() &&
3024 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3026 return handleVectorStoreIntrinsic(
I);
3029 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3030 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3032 return handleVectorLoadIntrinsic(
I);
3035 if (
I.doesNotAccessMemory())
3036 if (maybeHandleSimpleNomemIntrinsic(
I))
3044 setShadow(&
I, getShadow(&
I, 0));
3045 setOrigin(&
I, getOrigin(&
I, 0));
3053 InstrumentLifetimeStart =
false;
3054 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3060 Type *OpType =
Op->getType();
3063 setOrigin(&
I, getOrigin(
Op));
3068 Value *Src =
I.getArgOperand(0);
3074 Constant *IsZeroPoison = cast<Constant>(
I.getOperand(1));
3077 BoolShadow = IRB.
CreateOr(BoolShadow, BoolZeroPoison,
"_mscz_bs");
3080 Value *OutputShadow =
3081 IRB.
CreateSExt(BoolShadow, getShadowTy(Src),
"_mscz_os");
3083 setShadow(&
I, OutputShadow);
3084 setOriginForNaryOp(
I);
3102 void handleVectorConvertIntrinsic(
IntrinsicInst &
I,
int NumUsedElements,
3103 bool HasRoundingMode =
false) {
3105 Value *CopyOp, *ConvertOp;
3107 assert((!HasRoundingMode ||
3108 isa<ConstantInt>(
I.getArgOperand(
I.arg_size() - 1))) &&
3109 "Invalid rounding mode");
3111 switch (
I.arg_size() - HasRoundingMode) {
3113 CopyOp =
I.getArgOperand(0);
3114 ConvertOp =
I.getArgOperand(1);
3117 ConvertOp =
I.getArgOperand(0);
3131 Value *ConvertShadow = getShadow(ConvertOp);
3132 Value *AggShadow =
nullptr;
3135 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3136 for (
int i = 1; i < NumUsedElements; ++i) {
3138 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3139 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3142 AggShadow = ConvertShadow;
3145 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &
I);
3152 Value *ResultShadow = getShadow(CopyOp);
3153 Type *EltTy = cast<VectorType>(ResultShadow->
getType())->getElementType();
3154 for (
int i = 0; i < NumUsedElements; ++i) {
3156 ResultShadow, ConstantInt::getNullValue(EltTy),
3159 setShadow(&
I, ResultShadow);
3160 setOrigin(&
I, getOrigin(CopyOp));
3162 setShadow(&
I, getCleanShadow(&
I));
3163 setOrigin(&
I, getCleanOrigin());
3171 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3174 return CreateShadowCast(IRB, S2,
T,
true);
3182 return CreateShadowCast(IRB, S2,
T,
true);
3199 void handleVectorShiftIntrinsic(
IntrinsicInst &
I,
bool Variable) {
3205 Value *S2 = getShadow(&
I, 1);
3206 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3207 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3208 Value *V1 =
I.getOperand(0);
3211 {IRB.CreateBitCast(S1, V1->getType()), V2});
3213 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3214 setOriginForNaryOp(
I);
3218 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
3219 const unsigned X86_MMXSizeInBits = 64;
3220 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3221 "Illegal MMX vector element size");
3223 X86_MMXSizeInBits / EltSizeInBits);
3230 case Intrinsic::x86_sse2_packsswb_128:
3231 case Intrinsic::x86_sse2_packuswb_128:
3232 return Intrinsic::x86_sse2_packsswb_128;
3234 case Intrinsic::x86_sse2_packssdw_128:
3235 case Intrinsic::x86_sse41_packusdw:
3236 return Intrinsic::x86_sse2_packssdw_128;
3238 case Intrinsic::x86_avx2_packsswb:
3239 case Intrinsic::x86_avx2_packuswb:
3240 return Intrinsic::x86_avx2_packsswb;
3242 case Intrinsic::x86_avx2_packssdw:
3243 case Intrinsic::x86_avx2_packusdw:
3244 return Intrinsic::x86_avx2_packssdw;
3246 case Intrinsic::x86_mmx_packsswb:
3247 case Intrinsic::x86_mmx_packuswb:
3248 return Intrinsic::x86_mmx_packsswb;
3250 case Intrinsic::x86_mmx_packssdw:
3251 return Intrinsic::x86_mmx_packssdw;
3265 unsigned MMXEltSizeInBits = 0) {
3269 Value *S2 = getShadow(&
I, 1);
3270 assert(
S1->getType()->isVectorTy());
3276 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3277 if (MMXEltSizeInBits) {
3285 if (MMXEltSizeInBits) {
3291 {}, {S1_ext, S2_ext},
nullptr,
3292 "_msprop_vector_pack");
3293 if (MMXEltSizeInBits)
3296 setOriginForNaryOp(
I);
3300 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3313 const unsigned Width =
3314 cast<FixedVectorType>(S->
getType())->getNumElements();
3320 Value *DstMaskV = createDppMask(Width, DstMask);
3340 Value *S0 = getShadow(&
I, 0);
3344 const unsigned Width =
3345 cast<FixedVectorType>(S->
getType())->getNumElements();
3346 assert(Width == 2 || Width == 4 || Width == 8);
3348 const unsigned Mask = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3349 const unsigned SrcMask =
Mask >> 4;
3350 const unsigned DstMask =
Mask & 0xf;
3353 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3358 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3365 setOriginForNaryOp(
I);
3369 C = CreateAppToShadowCast(IRB,
C);
3383 Value *Sc = getShadow(&
I, 2);
3384 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3389 C = convertBlendvToSelectMask(IRB,
C);
3390 Sc = convertBlendvToSelectMask(IRB, Sc);
3396 handleSelectLikeInst(
I,
C,
T,
F);
3400 void handleVectorSadIntrinsic(
IntrinsicInst &
I,
bool IsMMX =
false) {
3401 const unsigned SignificantBitsPerResultElement = 16;
3403 unsigned ZeroBitsPerResultElement =
3407 auto *Shadow0 = getShadow(&
I, 0);
3408 auto *Shadow1 = getShadow(&
I, 1);
3413 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3416 setOriginForNaryOp(
I);
3421 unsigned MMXEltSizeInBits = 0) {
3423 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits * 2) :
I.
getType();
3425 auto *Shadow0 = getShadow(&
I, 0);
3426 auto *Shadow1 = getShadow(&
I, 1);
3433 setOriginForNaryOp(
I);
3441 Type *ResTy = getShadowTy(&
I);
3442 auto *Shadow0 = getShadow(&
I, 0);
3443 auto *Shadow1 = getShadow(&
I, 1);
3448 setOriginForNaryOp(
I);
3456 auto *Shadow0 = getShadow(&
I, 0);
3457 auto *Shadow1 = getShadow(&
I, 1);
3459 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
3461 setOriginForNaryOp(
I);
3470 setOrigin(&
I, getOrigin(&
I, 0));
3478 Value *OperandShadow = getShadow(&
I, 0);
3480 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
3488 setOrigin(&
I, getOrigin(&
I, 0));
3496 Value *OperandShadow = getShadow(&
I, 0);
3497 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
3505 setOrigin(&
I, getOrigin(&
I, 0));
3513 getShadowOriginPtr(
Addr, IRB, Ty,
Align(1),
true).first;
3518 insertShadowCheck(
Addr, &
I);
3529 Value *ShadowPtr, *OriginPtr;
3530 std::tie(ShadowPtr, OriginPtr) =
3531 getShadowOriginPtr(
Addr, IRB, Ty, Alignment,
false);
3534 insertShadowCheck(
Addr, &
I);
3537 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
3539 insertShadowCheck(Shadow, Origin, &
I);
3547 Value *PassThru =
I.getArgOperand(2);
3550 insertShadowCheck(
Ptr, &
I);
3551 insertShadowCheck(Mask, &
I);
3554 if (!PropagateShadow) {
3555 setShadow(&
I, getCleanShadow(&
I));
3556 setOrigin(&
I, getCleanOrigin());
3560 Type *ShadowTy = getShadowTy(&
I);
3561 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3562 auto [ShadowPtr, OriginPtr] =
3563 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
false);
3567 getShadow(PassThru),
"_msmaskedexpload");
3569 setShadow(&
I, Shadow);
3572 setOrigin(&
I, getCleanOrigin());
3577 Value *Values =
I.getArgOperand(0);
3583 insertShadowCheck(
Ptr, &
I);
3584 insertShadowCheck(Mask, &
I);
3587 Value *Shadow = getShadow(Values);
3588 Type *ElementShadowTy =
3589 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3590 auto [ShadowPtr, OriginPtrs] =
3591 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
true);
3600 Value *Ptrs =
I.getArgOperand(0);
3601 const Align Alignment(
3602 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3604 Value *PassThru =
I.getArgOperand(3);
3606 Type *PtrsShadowTy = getShadowTy(Ptrs);
3608 insertShadowCheck(Mask, &
I);
3612 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3615 if (!PropagateShadow) {
3616 setShadow(&
I, getCleanShadow(&
I));
3617 setOrigin(&
I, getCleanOrigin());
3621 Type *ShadowTy = getShadowTy(&
I);
3622 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3623 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3624 Ptrs, IRB, ElementShadowTy, Alignment,
false);
3628 getShadow(PassThru),
"_msmaskedgather");
3630 setShadow(&
I, Shadow);
3633 setOrigin(&
I, getCleanOrigin());
3638 Value *Values =
I.getArgOperand(0);
3639 Value *Ptrs =
I.getArgOperand(1);
3640 const Align Alignment(
3641 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3644 Type *PtrsShadowTy = getShadowTy(Ptrs);
3646 insertShadowCheck(Mask, &
I);
3650 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3653 Value *Shadow = getShadow(Values);
3654 Type *ElementShadowTy =
3655 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3656 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3657 Ptrs, IRB, ElementShadowTy, Alignment,
true);
3666 Value *
V =
I.getArgOperand(0);
3668 const Align Alignment(
3669 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3671 Value *Shadow = getShadow(V);
3674 insertShadowCheck(
Ptr, &
I);
3675 insertShadowCheck(Mask, &
I);
3680 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3681 Ptr, IRB, Shadow->
getType(), Alignment,
true);
3685 if (!MS.TrackOrigins)
3688 auto &
DL =
F.getDataLayout();
3689 paintOrigin(IRB, getOrigin(V), OriginPtr,
3697 const Align Alignment(
3698 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3700 Value *PassThru =
I.getArgOperand(3);
3703 insertShadowCheck(
Ptr, &
I);
3704 insertShadowCheck(Mask, &
I);
3707 if (!PropagateShadow) {
3708 setShadow(&
I, getCleanShadow(&
I));
3709 setOrigin(&
I, getCleanOrigin());
3713 Type *ShadowTy = getShadowTy(&
I);
3714 Value *ShadowPtr, *OriginPtr;
3715 std::tie(ShadowPtr, OriginPtr) =
3716 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
3718 getShadow(PassThru),
"_msmaskedld"));
3720 if (!MS.TrackOrigins)
3727 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
3732 setOrigin(&
I, Origin);
3742 Type *ShadowTy = getShadowTy(&
I);
3745 Value *SMask = getShadow(&
I, 1);
3750 {getShadow(&I, 0), I.getOperand(1)});
3753 setOriginForNaryOp(
I);
3758 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
3775 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3776 assert(isa<ConstantInt>(
I.getArgOperand(2)) &&
3777 "pclmul 3rd operand must be a constant");
3778 unsigned Imm = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3780 getPclmulMask(Width, Imm & 0x01));
3782 getPclmulMask(Width, Imm & 0x10));
3783 ShadowAndOriginCombiner SOC(
this, IRB);
3784 SOC.Add(Shuf0, getOrigin(&
I, 0));
3785 SOC.Add(Shuf1, getOrigin(&
I, 1));
3793 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3795 Value *Second = getShadow(&
I, 1);
3798 Mask.push_back(Width);
3799 for (
unsigned i = 1; i < Width; i++)
3803 setShadow(&
I, Shadow);
3804 setOriginForNaryOp(
I);
3809 Value *Shadow0 = getShadow(&
I, 0);
3810 Value *Shadow1 = getShadow(&
I, 1);
3816 setShadow(&
I, Shadow);
3817 setOriginForNaryOp(
I);
3823 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3825 Value *Second = getShadow(&
I, 1);
3829 Mask.push_back(Width);
3830 for (
unsigned i = 1; i < Width; i++)
3834 setShadow(&
I, Shadow);
3835 setOriginForNaryOp(
I);
3842 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3844 assert(isa<ConstantInt>(
I.getArgOperand(1)));
3847 ShadowAndOriginCombiner
SC(
this, IRB);
3848 SC.Add(
I.getArgOperand(0));
3856 assert(
I.getType()->isIntOrIntVectorTy());
3857 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3861 setShadow(&
I, getShadow(&
I, 0));
3862 setOrigin(&
I, getOrigin(&
I, 0));
3867 Value *Shadow = getShadow(&
I, 0);
3868 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3869 setOrigin(&
I, getOrigin(&
I, 0));
3874 Value *Shadow0 = getShadow(&
I, 0);
3875 Value *Shadow1 = getShadow(&
I, 1);
3878 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
3884 setShadow(&
I, Shadow);
3885 setOriginForNaryOp(
I);
3902 void handleNEONVectorStoreIntrinsic(
IntrinsicInst &
I,
bool useLane) {
3906 int numArgOperands =
I.arg_size();
3909 assert(numArgOperands >= 1);
3910 Value *
Addr =
I.getArgOperand(numArgOperands - 1);
3912 int skipTrailingOperands = 1;
3915 insertShadowCheck(
Addr, &
I);
3919 skipTrailingOperands++;
3920 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
3922 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
3927 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
3928 assert(isa<FixedVectorType>(
I.getArgOperand(i)->getType()));
3929 Value *Shadow = getShadow(&
I, i);
3930 ShadowArgs.
append(1, Shadow);
3945 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getElementType(),
3946 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements() *
3947 (numArgOperands - skipTrailingOperands));
3948 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
3952 I.getArgOperand(numArgOperands - skipTrailingOperands));
3954 Value *OutputShadowPtr, *OutputOriginPtr;
3956 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
3957 Addr, IRB, OutputShadowTy,
Align(1),
true);
3958 ShadowArgs.
append(1, OutputShadowPtr);
3964 if (MS.TrackOrigins) {
3972 OriginCombiner
OC(
this, IRB);
3973 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
3974 OC.Add(
I.getArgOperand(i));
3977 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
3997 unsigned int trailingVerbatimArgs) {
4000 assert(trailingVerbatimArgs <
I.arg_size());
4004 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
4005 Value *Shadow = getShadow(&
I, i);
4009 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4011 Value *Arg =
I.getArgOperand(i);
4017 Value *CombinedShadow = CI;
4020 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4023 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
4024 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
4027 setShadow(&
I, CombinedShadow);
4029 setOriginForNaryOp(
I);
4038 switch (
I.getIntrinsicID()) {
4039 case Intrinsic::uadd_with_overflow:
4040 case Intrinsic::sadd_with_overflow:
4041 case Intrinsic::usub_with_overflow:
4042 case Intrinsic::ssub_with_overflow:
4043 case Intrinsic::umul_with_overflow:
4044 case Intrinsic::smul_with_overflow:
4045 handleArithmeticWithOverflow(
I);
4047 case Intrinsic::abs:
4048 handleAbsIntrinsic(
I);
4050 case Intrinsic::is_fpclass:
4053 case Intrinsic::lifetime_start:
4054 handleLifetimeStart(
I);
4056 case Intrinsic::launder_invariant_group:
4057 case Intrinsic::strip_invariant_group:
4058 handleInvariantGroup(
I);
4060 case Intrinsic::bswap:
4063 case Intrinsic::ctlz:
4064 case Intrinsic::cttz:
4065 handleCountZeroes(
I);
4067 case Intrinsic::masked_compressstore:
4068 handleMaskedCompressStore(
I);
4070 case Intrinsic::masked_expandload:
4071 handleMaskedExpandLoad(
I);
4073 case Intrinsic::masked_gather:
4074 handleMaskedGather(
I);
4076 case Intrinsic::masked_scatter:
4077 handleMaskedScatter(
I);
4079 case Intrinsic::masked_store:
4080 handleMaskedStore(
I);
4082 case Intrinsic::masked_load:
4083 handleMaskedLoad(
I);
4085 case Intrinsic::vector_reduce_and:
4086 handleVectorReduceAndIntrinsic(
I);
4088 case Intrinsic::vector_reduce_or:
4089 handleVectorReduceOrIntrinsic(
I);
4091 case Intrinsic::vector_reduce_add:
4092 case Intrinsic::vector_reduce_xor:
4093 case Intrinsic::vector_reduce_mul:
4094 handleVectorReduceIntrinsic(
I);
4096 case Intrinsic::x86_sse_stmxcsr:
4099 case Intrinsic::x86_sse_ldmxcsr:
4102 case Intrinsic::x86_avx512_vcvtsd2usi64:
4103 case Intrinsic::x86_avx512_vcvtsd2usi32:
4104 case Intrinsic::x86_avx512_vcvtss2usi64:
4105 case Intrinsic::x86_avx512_vcvtss2usi32:
4106 case Intrinsic::x86_avx512_cvttss2usi64:
4107 case Intrinsic::x86_avx512_cvttss2usi:
4108 case Intrinsic::x86_avx512_cvttsd2usi64:
4109 case Intrinsic::x86_avx512_cvttsd2usi:
4110 case Intrinsic::x86_avx512_cvtusi2ss:
4111 case Intrinsic::x86_avx512_cvtusi642sd:
4112 case Intrinsic::x86_avx512_cvtusi642ss:
4113 handleVectorConvertIntrinsic(
I, 1,
true);
4115 case Intrinsic::x86_sse2_cvtsd2si64:
4116 case Intrinsic::x86_sse2_cvtsd2si:
4117 case Intrinsic::x86_sse2_cvtsd2ss:
4118 case Intrinsic::x86_sse2_cvttsd2si64:
4119 case Intrinsic::x86_sse2_cvttsd2si:
4120 case Intrinsic::x86_sse_cvtss2si64:
4121 case Intrinsic::x86_sse_cvtss2si:
4122 case Intrinsic::x86_sse_cvttss2si64:
4123 case Intrinsic::x86_sse_cvttss2si:
4124 handleVectorConvertIntrinsic(
I, 1);
4126 case Intrinsic::x86_sse_cvtps2pi:
4127 case Intrinsic::x86_sse_cvttps2pi:
4128 handleVectorConvertIntrinsic(
I, 2);
4131 case Intrinsic::x86_avx512_psll_w_512:
4132 case Intrinsic::x86_avx512_psll_d_512:
4133 case Intrinsic::x86_avx512_psll_q_512:
4134 case Intrinsic::x86_avx512_pslli_w_512:
4135 case Intrinsic::x86_avx512_pslli_d_512:
4136 case Intrinsic::x86_avx512_pslli_q_512:
4137 case Intrinsic::x86_avx512_psrl_w_512:
4138 case Intrinsic::x86_avx512_psrl_d_512:
4139 case Intrinsic::x86_avx512_psrl_q_512:
4140 case Intrinsic::x86_avx512_psra_w_512:
4141 case Intrinsic::x86_avx512_psra_d_512:
4142 case Intrinsic::x86_avx512_psra_q_512:
4143 case Intrinsic::x86_avx512_psrli_w_512:
4144 case Intrinsic::x86_avx512_psrli_d_512:
4145 case Intrinsic::x86_avx512_psrli_q_512:
4146 case Intrinsic::x86_avx512_psrai_w_512:
4147 case Intrinsic::x86_avx512_psrai_d_512:
4148 case Intrinsic::x86_avx512_psrai_q_512:
4149 case Intrinsic::x86_avx512_psra_q_256:
4150 case Intrinsic::x86_avx512_psra_q_128:
4151 case Intrinsic::x86_avx512_psrai_q_256:
4152 case Intrinsic::x86_avx512_psrai_q_128:
4153 case Intrinsic::x86_avx2_psll_w:
4154 case Intrinsic::x86_avx2_psll_d:
4155 case Intrinsic::x86_avx2_psll_q:
4156 case Intrinsic::x86_avx2_pslli_w:
4157 case Intrinsic::x86_avx2_pslli_d:
4158 case Intrinsic::x86_avx2_pslli_q:
4159 case Intrinsic::x86_avx2_psrl_w:
4160 case Intrinsic::x86_avx2_psrl_d:
4161 case Intrinsic::x86_avx2_psrl_q:
4162 case Intrinsic::x86_avx2_psra_w:
4163 case Intrinsic::x86_avx2_psra_d:
4164 case Intrinsic::x86_avx2_psrli_w:
4165 case Intrinsic::x86_avx2_psrli_d:
4166 case Intrinsic::x86_avx2_psrli_q:
4167 case Intrinsic::x86_avx2_psrai_w:
4168 case Intrinsic::x86_avx2_psrai_d:
4169 case Intrinsic::x86_sse2_psll_w:
4170 case Intrinsic::x86_sse2_psll_d:
4171 case Intrinsic::x86_sse2_psll_q:
4172 case Intrinsic::x86_sse2_pslli_w:
4173 case Intrinsic::x86_sse2_pslli_d:
4174 case Intrinsic::x86_sse2_pslli_q:
4175 case Intrinsic::x86_sse2_psrl_w:
4176 case Intrinsic::x86_sse2_psrl_d:
4177 case Intrinsic::x86_sse2_psrl_q:
4178 case Intrinsic::x86_sse2_psra_w:
4179 case Intrinsic::x86_sse2_psra_d:
4180 case Intrinsic::x86_sse2_psrli_w:
4181 case Intrinsic::x86_sse2_psrli_d:
4182 case Intrinsic::x86_sse2_psrli_q:
4183 case Intrinsic::x86_sse2_psrai_w:
4184 case Intrinsic::x86_sse2_psrai_d:
4185 case Intrinsic::x86_mmx_psll_w:
4186 case Intrinsic::x86_mmx_psll_d:
4187 case Intrinsic::x86_mmx_psll_q:
4188 case Intrinsic::x86_mmx_pslli_w:
4189 case Intrinsic::x86_mmx_pslli_d:
4190 case Intrinsic::x86_mmx_pslli_q:
4191 case Intrinsic::x86_mmx_psrl_w:
4192 case Intrinsic::x86_mmx_psrl_d:
4193 case Intrinsic::x86_mmx_psrl_q:
4194 case Intrinsic::x86_mmx_psra_w:
4195 case Intrinsic::x86_mmx_psra_d:
4196 case Intrinsic::x86_mmx_psrli_w:
4197 case Intrinsic::x86_mmx_psrli_d:
4198 case Intrinsic::x86_mmx_psrli_q:
4199 case Intrinsic::x86_mmx_psrai_w:
4200 case Intrinsic::x86_mmx_psrai_d:
4201 case Intrinsic::aarch64_neon_rshrn:
4202 case Intrinsic::aarch64_neon_sqrshl:
4203 case Intrinsic::aarch64_neon_sqrshrn:
4204 case Intrinsic::aarch64_neon_sqrshrun:
4205 case Intrinsic::aarch64_neon_sqshl:
4206 case Intrinsic::aarch64_neon_sqshlu:
4207 case Intrinsic::aarch64_neon_sqshrn:
4208 case Intrinsic::aarch64_neon_sqshrun:
4209 case Intrinsic::aarch64_neon_srshl:
4210 case Intrinsic::aarch64_neon_sshl:
4211 case Intrinsic::aarch64_neon_uqrshl:
4212 case Intrinsic::aarch64_neon_uqrshrn:
4213 case Intrinsic::aarch64_neon_uqshl:
4214 case Intrinsic::aarch64_neon_uqshrn:
4215 case Intrinsic::aarch64_neon_urshl:
4216 case Intrinsic::aarch64_neon_ushl:
4218 handleVectorShiftIntrinsic(
I,
false);
4220 case Intrinsic::x86_avx2_psllv_d:
4221 case Intrinsic::x86_avx2_psllv_d_256:
4222 case Intrinsic::x86_avx512_psllv_d_512:
4223 case Intrinsic::x86_avx2_psllv_q:
4224 case Intrinsic::x86_avx2_psllv_q_256:
4225 case Intrinsic::x86_avx512_psllv_q_512:
4226 case Intrinsic::x86_avx2_psrlv_d:
4227 case Intrinsic::x86_avx2_psrlv_d_256:
4228 case Intrinsic::x86_avx512_psrlv_d_512:
4229 case Intrinsic::x86_avx2_psrlv_q:
4230 case Intrinsic::x86_avx2_psrlv_q_256:
4231 case Intrinsic::x86_avx512_psrlv_q_512:
4232 case Intrinsic::x86_avx2_psrav_d:
4233 case Intrinsic::x86_avx2_psrav_d_256:
4234 case Intrinsic::x86_avx512_psrav_d_512:
4235 case Intrinsic::x86_avx512_psrav_q_128:
4236 case Intrinsic::x86_avx512_psrav_q_256:
4237 case Intrinsic::x86_avx512_psrav_q_512:
4238 handleVectorShiftIntrinsic(
I,
true);
4241 case Intrinsic::x86_sse2_packsswb_128:
4242 case Intrinsic::x86_sse2_packssdw_128:
4243 case Intrinsic::x86_sse2_packuswb_128:
4244 case Intrinsic::x86_sse41_packusdw:
4245 case Intrinsic::x86_avx2_packsswb:
4246 case Intrinsic::x86_avx2_packssdw:
4247 case Intrinsic::x86_avx2_packuswb:
4248 case Intrinsic::x86_avx2_packusdw:
4249 handleVectorPackIntrinsic(
I);
4252 case Intrinsic::x86_sse41_pblendvb:
4253 case Intrinsic::x86_sse41_blendvpd:
4254 case Intrinsic::x86_sse41_blendvps:
4255 case Intrinsic::x86_avx_blendv_pd_256:
4256 case Intrinsic::x86_avx_blendv_ps_256:
4257 case Intrinsic::x86_avx2_pblendvb:
4258 handleBlendvIntrinsic(
I);
4261 case Intrinsic::x86_avx_dp_ps_256:
4262 case Intrinsic::x86_sse41_dppd:
4263 case Intrinsic::x86_sse41_dpps:
4264 handleDppIntrinsic(
I);
4267 case Intrinsic::x86_mmx_packsswb:
4268 case Intrinsic::x86_mmx_packuswb:
4269 handleVectorPackIntrinsic(
I, 16);
4272 case Intrinsic::x86_mmx_packssdw:
4273 handleVectorPackIntrinsic(
I, 32);
4276 case Intrinsic::x86_mmx_psad_bw:
4277 handleVectorSadIntrinsic(
I,
true);
4279 case Intrinsic::x86_sse2_psad_bw:
4280 case Intrinsic::x86_avx2_psad_bw:
4281 handleVectorSadIntrinsic(
I);
4284 case Intrinsic::x86_sse2_pmadd_wd:
4285 case Intrinsic::x86_avx2_pmadd_wd:
4286 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
4287 case Intrinsic::x86_avx2_pmadd_ub_sw:
4288 handleVectorPmaddIntrinsic(
I);
4291 case Intrinsic::x86_ssse3_pmadd_ub_sw:
4292 handleVectorPmaddIntrinsic(
I, 8);
4295 case Intrinsic::x86_mmx_pmadd_wd:
4296 handleVectorPmaddIntrinsic(
I, 16);
4299 case Intrinsic::x86_sse_cmp_ss:
4300 case Intrinsic::x86_sse2_cmp_sd:
4301 case Intrinsic::x86_sse_comieq_ss:
4302 case Intrinsic::x86_sse_comilt_ss:
4303 case Intrinsic::x86_sse_comile_ss:
4304 case Intrinsic::x86_sse_comigt_ss:
4305 case Intrinsic::x86_sse_comige_ss:
4306 case Intrinsic::x86_sse_comineq_ss:
4307 case Intrinsic::x86_sse_ucomieq_ss:
4308 case Intrinsic::x86_sse_ucomilt_ss:
4309 case Intrinsic::x86_sse_ucomile_ss:
4310 case Intrinsic::x86_sse_ucomigt_ss:
4311 case Intrinsic::x86_sse_ucomige_ss:
4312 case Intrinsic::x86_sse_ucomineq_ss:
4313 case Intrinsic::x86_sse2_comieq_sd:
4314 case Intrinsic::x86_sse2_comilt_sd:
4315 case Intrinsic::x86_sse2_comile_sd:
4316 case Intrinsic::x86_sse2_comigt_sd:
4317 case Intrinsic::x86_sse2_comige_sd:
4318 case Intrinsic::x86_sse2_comineq_sd:
4319 case Intrinsic::x86_sse2_ucomieq_sd:
4320 case Intrinsic::x86_sse2_ucomilt_sd:
4321 case Intrinsic::x86_sse2_ucomile_sd:
4322 case Intrinsic::x86_sse2_ucomigt_sd:
4323 case Intrinsic::x86_sse2_ucomige_sd:
4324 case Intrinsic::x86_sse2_ucomineq_sd:
4325 handleVectorCompareScalarIntrinsic(
I);
4328 case Intrinsic::x86_avx_cmp_pd_256:
4329 case Intrinsic::x86_avx_cmp_ps_256:
4330 case Intrinsic::x86_sse2_cmp_pd:
4331 case Intrinsic::x86_sse_cmp_ps:
4332 handleVectorComparePackedIntrinsic(
I);
4335 case Intrinsic::x86_bmi_bextr_32:
4336 case Intrinsic::x86_bmi_bextr_64:
4337 case Intrinsic::x86_bmi_bzhi_32:
4338 case Intrinsic::x86_bmi_bzhi_64:
4339 case Intrinsic::x86_bmi_pdep_32:
4340 case Intrinsic::x86_bmi_pdep_64:
4341 case Intrinsic::x86_bmi_pext_32:
4342 case Intrinsic::x86_bmi_pext_64:
4343 handleBmiIntrinsic(
I);
4346 case Intrinsic::x86_pclmulqdq:
4347 case Intrinsic::x86_pclmulqdq_256:
4348 case Intrinsic::x86_pclmulqdq_512:
4349 handlePclmulIntrinsic(
I);
4352 case Intrinsic::x86_avx_round_pd_256:
4353 case Intrinsic::x86_avx_round_ps_256:
4354 case Intrinsic::x86_sse41_round_pd:
4355 case Intrinsic::x86_sse41_round_ps:
4356 handleRoundPdPsIntrinsic(
I);
4359 case Intrinsic::x86_sse41_round_sd:
4360 case Intrinsic::x86_sse41_round_ss:
4361 handleUnarySdSsIntrinsic(
I);
4364 case Intrinsic::x86_sse2_max_sd:
4365 case Intrinsic::x86_sse_max_ss:
4366 case Intrinsic::x86_sse2_min_sd:
4367 case Intrinsic::x86_sse_min_ss:
4368 handleBinarySdSsIntrinsic(
I);
4371 case Intrinsic::x86_avx_vtestc_pd:
4372 case Intrinsic::x86_avx_vtestc_pd_256:
4373 case Intrinsic::x86_avx_vtestc_ps:
4374 case Intrinsic::x86_avx_vtestc_ps_256:
4375 case Intrinsic::x86_avx_vtestnzc_pd:
4376 case Intrinsic::x86_avx_vtestnzc_pd_256:
4377 case Intrinsic::x86_avx_vtestnzc_ps:
4378 case Intrinsic::x86_avx_vtestnzc_ps_256:
4379 case Intrinsic::x86_avx_vtestz_pd:
4380 case Intrinsic::x86_avx_vtestz_pd_256:
4381 case Intrinsic::x86_avx_vtestz_ps:
4382 case Intrinsic::x86_avx_vtestz_ps_256:
4383 case Intrinsic::x86_avx_ptestc_256:
4384 case Intrinsic::x86_avx_ptestnzc_256:
4385 case Intrinsic::x86_avx_ptestz_256:
4386 case Intrinsic::x86_sse41_ptestc:
4387 case Intrinsic::x86_sse41_ptestnzc:
4388 case Intrinsic::x86_sse41_ptestz:
4389 handleVtestIntrinsic(
I);
4392 case Intrinsic::fshl:
4393 case Intrinsic::fshr:
4394 handleFunnelShift(
I);
4397 case Intrinsic::is_constant:
4399 setShadow(&
I, getCleanShadow(&
I));
4400 setOrigin(&
I, getCleanOrigin());
4403 case Intrinsic::aarch64_neon_st1x2:
4404 case Intrinsic::aarch64_neon_st1x3:
4405 case Intrinsic::aarch64_neon_st1x4:
4406 case Intrinsic::aarch64_neon_st2:
4407 case Intrinsic::aarch64_neon_st3:
4408 case Intrinsic::aarch64_neon_st4: {
4409 handleNEONVectorStoreIntrinsic(
I,
false);
4413 case Intrinsic::aarch64_neon_st2lane:
4414 case Intrinsic::aarch64_neon_st3lane:
4415 case Intrinsic::aarch64_neon_st4lane: {
4416 handleNEONVectorStoreIntrinsic(
I,
true);
4429 case Intrinsic::aarch64_neon_tbl1:
4430 case Intrinsic::aarch64_neon_tbl2:
4431 case Intrinsic::aarch64_neon_tbl3:
4432 case Intrinsic::aarch64_neon_tbl4:
4433 case Intrinsic::aarch64_neon_tbx1:
4434 case Intrinsic::aarch64_neon_tbx2:
4435 case Intrinsic::aarch64_neon_tbx3:
4436 case Intrinsic::aarch64_neon_tbx4: {
4438 handleIntrinsicByApplyingToShadow(
I, 1);
4442 case Intrinsic::aarch64_neon_fmulx:
4443 case Intrinsic::aarch64_neon_pmul:
4444 case Intrinsic::aarch64_neon_pmull:
4445 case Intrinsic::aarch64_neon_smull:
4446 case Intrinsic::aarch64_neon_pmull64:
4447 case Intrinsic::aarch64_neon_umull: {
4448 handleNEONVectorMultiplyIntrinsic(
I);
4453 if (!handleUnknownIntrinsic(
I))
4454 visitInstruction(
I);
4459 void visitLibAtomicLoad(
CallBase &CB) {
4461 assert(isa<CallInst>(CB));
4470 Value *NewOrdering =
4474 NextNodeIRBuilder NextIRB(&CB);
4475 Value *SrcShadowPtr, *SrcOriginPtr;
4476 std::tie(SrcShadowPtr, SrcOriginPtr) =
4477 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4479 Value *DstShadowPtr =
4480 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4484 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
4485 if (MS.TrackOrigins) {
4486 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4488 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4489 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4493 void visitLibAtomicStore(
CallBase &CB) {
4500 Value *NewOrdering =
4504 Value *DstShadowPtr =
4522 visitAsmInstruction(CB);
4524 visitInstruction(CB);
4533 case LibFunc_atomic_load:
4534 if (!isa<CallInst>(CB)) {
4535 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
4539 visitLibAtomicLoad(CB);
4541 case LibFunc_atomic_store:
4542 visitLibAtomicStore(CB);
4549 if (
auto *Call = dyn_cast<CallInst>(&CB)) {
4550 assert(!isa<IntrinsicInst>(Call) &&
"intrinsics are handled elsewhere");
4558 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4560 Call->removeFnAttrs(
B);
4562 Func->removeFnAttrs(
B);
4568 bool MayCheckCall = MS.EagerChecks;
4572 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
4575 unsigned ArgOffset = 0;
4578 if (!
A->getType()->isSized()) {
4579 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
4583 if (
A->getType()->isScalableTy()) {
4584 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
4586 insertShadowCheck(
A, &CB);
4595 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4598 insertShadowCheck(
A, &CB);
4599 Size =
DL.getTypeAllocSize(
A->getType());
4605 Value *ArgShadow = getShadow(
A);
4606 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4608 <<
" Shadow: " << *ArgShadow <<
"\n");
4612 assert(
A->getType()->isPointerTy() &&
4613 "ByVal argument is not a pointer!");
4621 Value *AShadowPtr, *AOriginPtr;
4622 std::tie(AShadowPtr, AOriginPtr) =
4623 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
4625 if (!PropagateShadow) {
4632 if (MS.TrackOrigins) {
4633 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4647 Size =
DL.getTypeAllocSize(
A->getType());
4652 Constant *Cst = dyn_cast<Constant>(ArgShadow);
4653 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
4655 getOriginPtrForArgument(IRB, ArgOffset));
4659 assert(Store !=
nullptr);
4668 if (FT->isVarArg()) {
4669 VAHelper->visitCallBase(CB, IRB);
4676 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4679 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
4680 setShadow(&CB, getCleanShadow(&CB));
4681 setOrigin(&CB, getCleanOrigin());
4687 Value *
Base = getShadowPtrForRetval(IRBBefore);
4688 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
4691 if (isa<CallInst>(CB)) {
4695 BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4700 setShadow(&CB, getCleanShadow(&CB));
4701 setOrigin(&CB, getCleanOrigin());
4708 "Could not find insertion point for retval shadow load");
4711 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4714 setShadow(&CB, RetvalShadow);
4715 if (MS.TrackOrigins)
4716 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
4720 if (
auto *
I = dyn_cast<BitCastInst>(RetVal)) {
4721 RetVal =
I->getOperand(0);
4723 if (
auto *
I = dyn_cast<CallInst>(RetVal)) {
4724 return I->isMustTailCall();
4731 Value *RetVal =
I.getReturnValue();
4737 Value *ShadowPtr = getShadowPtrForRetval(IRB);
4738 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
4739 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4742 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
4744 Value *Shadow = getShadow(RetVal);
4745 bool StoreOrigin =
true;
4747 insertShadowCheck(RetVal, &
I);
4748 Shadow = getCleanShadow(RetVal);
4749 StoreOrigin =
false;
4756 if (MS.TrackOrigins && StoreOrigin)
4757 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4763 if (!PropagateShadow) {
4764 setShadow(&
I, getCleanShadow(&
I));
4765 setOrigin(&
I, getCleanOrigin());
4769 ShadowPHINodes.push_back(&
I);
4770 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
4772 if (MS.TrackOrigins)
4774 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
4791 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
4793 Value *ShadowBase, *OriginBase;
4794 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4801 if (PoisonStack && MS.TrackOrigins) {
4802 Value *Idptr = getLocalVarIdptr(
I);
4804 Value *Descr = getLocalVarDescription(
I);
4805 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4806 {&I, Len, Idptr, Descr});
4808 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
4814 Value *Descr = getLocalVarDescription(
I);
4816 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
4818 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
4825 NextNodeIRBuilder IRB(InsPoint);
4827 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
4829 if (
I.isArrayAllocation())
4833 if (MS.CompileKernel)
4834 poisonAllocaKmsan(
I, IRB, Len);
4836 poisonAllocaUserspace(
I, IRB, Len);
4840 setShadow(&
I, getCleanShadow(&
I));
4841 setOrigin(&
I, getCleanOrigin());
4853 handleSelectLikeInst(
I,
B,
C,
D);
4859 Value *Sb = getShadow(
B);
4860 Value *Sc = getShadow(
C);
4861 Value *Sd = getShadow(
D);
4863 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
4864 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
4865 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
4870 if (
I.getType()->isAggregateType()) {
4874 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
4882 C = CreateAppToShadowCast(IRB,
C);
4883 D = CreateAppToShadowCast(IRB,
D);
4890 if (MS.TrackOrigins) {
4893 if (
B->getType()->isVectorTy()) {
4894 B = convertToBool(
B, IRB);
4895 Sb = convertToBool(Sb, IRB);
4906 setShadow(&
I, getCleanShadow(&
I));
4907 setOrigin(&
I, getCleanOrigin());
4911 setShadow(&
I, getCleanShadow(&
I));
4912 setOrigin(&
I, getCleanOrigin());
4916 setShadow(&
I, getCleanShadow(&
I));
4917 setOrigin(&
I, getCleanOrigin());
4924 Value *Agg =
I.getAggregateOperand();
4926 Value *AggShadow = getShadow(Agg);
4930 setShadow(&
I, ResShadow);
4931 setOriginForNaryOp(
I);
4937 Value *AggShadow = getShadow(
I.getAggregateOperand());
4938 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
4944 setOriginForNaryOp(
I);
4948 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
4951 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
4953 errs() <<
"QQQ " <<
I <<
"\n";
4980 insertShadowCheck(Operand, &
I);
4987 auto Size =
DL.getTypeStoreSize(ElemTy);
4989 if (MS.CompileKernel) {
4990 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
4996 auto [ShadowPtr,
_] =
4997 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
5008 int NumRetOutputs = 0;
5010 Type *
RetTy = cast<Value>(CB)->getType();
5011 if (!
RetTy->isVoidTy()) {
5013 auto *
ST = dyn_cast<StructType>(
RetTy);
5015 NumRetOutputs =
ST->getNumElements();
5021 switch (
Info.Type) {
5029 return NumOutputs - NumRetOutputs;
5052 int OutputArgs = getNumOutputArgs(IA, CB);
5058 for (
int i = OutputArgs; i < NumOperands; i++) {
5066 for (
int i = 0; i < OutputArgs; i++) {
5072 setShadow(&
I, getCleanShadow(&
I));
5073 setOrigin(&
I, getCleanOrigin());
5078 setShadow(&
I, getCleanShadow(&
I));
5079 setOrigin(&
I, getCleanOrigin());
5087 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
5088 Value *Operand =
I.getOperand(i);
5090 insertShadowCheck(Operand, &
I);
5092 setShadow(&
I, getCleanShadow(&
I));
5093 setOrigin(&
I, getCleanOrigin());
5097struct VarArgHelperBase :
public VarArgHelper {
5099 MemorySanitizer &MS;
5100 MemorySanitizerVisitor &MSV;
5102 const unsigned VAListTagSize;
5104 VarArgHelperBase(
Function &
F, MemorySanitizer &MS,
5105 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5106 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
5110 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5126 return getShadowPtrForVAArgument(IRB, ArgOffset);
5140 unsigned BaseOffset) {
5149 TailSize,
Align(8));
5154 Value *VAListTag =
I.getArgOperand(0);
5156 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
5157 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
5160 VAListTagSize, Alignment,
false);
5167 unpoisonVAListTagForInst(
I);
5173 unpoisonVAListTagForInst(
I);
5178struct VarArgAMD64Helper :
public VarArgHelperBase {
5181 static const unsigned AMD64GpEndOffset = 48;
5182 static const unsigned AMD64FpEndOffsetSSE = 176;
5184 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
5186 unsigned AMD64FpEndOffset;
5189 Value *VAArgOverflowSize =
nullptr;
5191 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5193 VarArgAMD64Helper(
Function &
F, MemorySanitizer &MS,
5194 MemorySanitizerVisitor &MSV)
5195 : VarArgHelperBase(
F, MS, MSV, 24) {
5196 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
5197 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
5198 if (Attr.isStringAttribute() &&
5199 (Attr.getKindAsString() ==
"target-features")) {
5200 if (Attr.getValueAsString().contains(
"-sse"))
5201 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
5207 ArgKind classifyArgument(
Value *arg) {
5210 if (
T->isX86_FP80Ty())
5212 if (
T->isFPOrFPVectorTy())
5213 return AK_FloatingPoint;
5214 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
5215 return AK_GeneralPurpose;
5216 if (
T->isPointerTy())
5217 return AK_GeneralPurpose;
5230 unsigned GpOffset = 0;
5231 unsigned FpOffset = AMD64GpEndOffset;
5232 unsigned OverflowOffset = AMD64FpEndOffset;
5237 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5244 assert(
A->getType()->isPointerTy());
5246 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5248 unsigned BaseOffset = OverflowOffset;
5249 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5250 Value *OriginBase =
nullptr;
5251 if (MS.TrackOrigins)
5252 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5253 OverflowOffset += AlignedSize;
5256 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5260 Value *ShadowPtr, *OriginPtr;
5261 std::tie(ShadowPtr, OriginPtr) =
5266 if (MS.TrackOrigins)
5270 ArgKind AK = classifyArgument(
A);
5271 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
5273 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
5275 Value *ShadowBase, *OriginBase =
nullptr;
5277 case AK_GeneralPurpose:
5278 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
5279 if (MS.TrackOrigins)
5280 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
5284 case AK_FloatingPoint:
5285 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
5286 if (MS.TrackOrigins)
5287 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5294 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5296 unsigned BaseOffset = OverflowOffset;
5297 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5298 if (MS.TrackOrigins) {
5299 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5301 OverflowOffset += AlignedSize;
5304 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5313 Value *Shadow = MSV.getShadow(
A);
5315 if (MS.TrackOrigins) {
5316 Value *Origin = MSV.getOrigin(
A);
5318 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5324 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
5325 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5328 void finalizeInstrumentation()
override {
5329 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5330 "finalizeInstrumentation called twice");
5331 if (!VAStartInstrumentationList.
empty()) {
5338 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
5345 Intrinsic::umin, CopySize,
5349 if (MS.TrackOrigins) {
5359 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5360 NextNodeIRBuilder IRB(OrigInst);
5361 Value *VAListTag = OrigInst->getArgOperand(0);
5365 ConstantInt::get(MS.IntptrTy, 16)),
5368 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5370 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5371 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5373 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5375 if (MS.TrackOrigins)
5376 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5377 Alignment, AMD64FpEndOffset);
5380 ConstantInt::get(MS.IntptrTy, 8)),
5382 Value *OverflowArgAreaPtr =
5383 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
5384 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5385 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5386 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
5390 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5392 if (MS.TrackOrigins) {
5395 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5403struct VarArgAArch64Helper :
public VarArgHelperBase {
5404 static const unsigned kAArch64GrArgSize = 64;
5405 static const unsigned kAArch64VrArgSize = 128;
5407 static const unsigned AArch64GrBegOffset = 0;
5408 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5410 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5411 static const unsigned AArch64VrEndOffset =
5412 AArch64VrBegOffset + kAArch64VrArgSize;
5413 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5416 Value *VAArgOverflowSize =
nullptr;
5418 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5420 VarArgAArch64Helper(
Function &
F, MemorySanitizer &MS,
5421 MemorySanitizerVisitor &MSV)
5422 : VarArgHelperBase(
F, MS, MSV, 32) {}
5425 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
5426 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
5427 return {AK_GeneralPurpose, 1};
5428 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
5429 return {AK_FloatingPoint, 1};
5431 if (
T->isArrayTy()) {
5432 auto R = classifyArgument(
T->getArrayElementType());
5433 R.second *=
T->getScalarType()->getArrayNumElements();
5438 auto R = classifyArgument(FV->getScalarType());
5439 R.second *= FV->getNumElements();
5444 return {AK_Memory, 0};
5457 unsigned GrOffset = AArch64GrBegOffset;
5458 unsigned VrOffset = AArch64VrBegOffset;
5459 unsigned OverflowOffset = AArch64VAEndOffset;
5464 auto [AK, RegNum] = classifyArgument(
A->getType());
5465 if (AK == AK_GeneralPurpose &&
5466 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5468 if (AK == AK_FloatingPoint &&
5469 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5473 case AK_GeneralPurpose:
5474 Base = getShadowPtrForVAArgument(IRB, GrOffset);
5475 GrOffset += 8 * RegNum;
5477 case AK_FloatingPoint:
5478 Base = getShadowPtrForVAArgument(IRB, VrOffset);
5479 VrOffset += 16 * RegNum;
5486 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5488 unsigned BaseOffset = OverflowOffset;
5489 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
5490 OverflowOffset += AlignedSize;
5493 CleanUnusedTLS(IRB,
Base, BaseOffset);
5505 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5506 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5513 ConstantInt::get(MS.IntptrTy, offset)),
5522 ConstantInt::get(MS.IntptrTy, offset)),
5525 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
5528 void finalizeInstrumentation()
override {
5529 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5530 "finalizeInstrumentation called twice");
5531 if (!VAStartInstrumentationList.empty()) {
5538 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5545 Intrinsic::umin, CopySize,
5551 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5552 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5556 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5557 NextNodeIRBuilder IRB(OrigInst);
5559 Value *VAListTag = OrigInst->getArgOperand(0);
5576 Value *StackSaveAreaPtr =
5577 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5580 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5581 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5584 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5587 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5588 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5591 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5597 Value *GrRegSaveAreaShadowPtrOff =
5598 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
5600 Value *GrRegSaveAreaShadowPtr =
5601 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5607 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5613 Value *VrRegSaveAreaShadowPtrOff =
5614 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
5616 Value *VrRegSaveAreaShadowPtr =
5617 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5624 VrRegSaveAreaShadowPtrOff);
5625 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5631 Value *StackSaveAreaShadowPtr =
5632 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5637 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
5640 Align(16), VAArgOverflowSize);
5646struct VarArgPowerPCHelper :
public VarArgHelperBase {
5648 Value *VAArgSize =
nullptr;
5650 VarArgPowerPCHelper(
Function &
F, MemorySanitizer &MS,
5651 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5652 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
5662 Triple TargetTriple(
F.getParent()->getTargetTriple());
5666 if (TargetTriple.isPPC64()) {
5667 if (TargetTriple.isPPC64ELFv2ABI())
5675 unsigned VAArgOffset = VAArgBase;
5679 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5681 assert(
A->getType()->isPointerTy());
5683 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5686 ArgAlign =
Align(8);
5687 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5690 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5692 Value *AShadowPtr, *AOriginPtr;
5693 std::tie(AShadowPtr, AOriginPtr) =
5694 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
5704 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5706 if (
A->getType()->isArrayTy()) {
5709 Type *ElementTy =
A->getType()->getArrayElementType();
5711 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
5712 }
else if (
A->getType()->isVectorTy()) {
5714 ArgAlign =
Align(ArgSize);
5717 ArgAlign =
Align(8);
5718 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5719 if (
DL.isBigEndian()) {
5723 VAArgOffset += (8 - ArgSize);
5727 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5731 VAArgOffset += ArgSize;
5735 VAArgBase = VAArgOffset;
5739 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
5742 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5745 void finalizeInstrumentation()
override {
5746 assert(!VAArgSize && !VAArgTLSCopy &&
5747 "finalizeInstrumentation called twice");
5750 Value *CopySize = VAArgSize;
5752 if (!VAStartInstrumentationList.empty()) {
5762 Intrinsic::umin, CopySize,
5770 Triple TargetTriple(
F.getParent()->getTargetTriple());
5771 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5772 NextNodeIRBuilder IRB(OrigInst);
5773 Value *VAListTag = OrigInst->getArgOperand(0);
5777 if (!TargetTriple.isPPC64()) {
5779 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
5781 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
5784 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5786 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
5788 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5789 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5791 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5798struct VarArgSystemZHelper :
public VarArgHelperBase {
5799 static const unsigned SystemZGpOffset = 16;
5800 static const unsigned SystemZGpEndOffset = 56;
5801 static const unsigned SystemZFpOffset = 128;
5802 static const unsigned SystemZFpEndOffset = 160;
5803 static const unsigned SystemZMaxVrArgs = 8;
5804 static const unsigned SystemZRegSaveAreaSize = 160;
5805 static const unsigned SystemZOverflowOffset = 160;
5806 static const unsigned SystemZVAListTagSize = 32;
5807 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5808 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5810 bool IsSoftFloatABI;
5813 Value *VAArgOverflowSize =
nullptr;
5815 enum class ArgKind {
5823 enum class ShadowExtension {
None,
Zero, Sign };
5825 VarArgSystemZHelper(
Function &
F, MemorySanitizer &MS,
5826 MemorySanitizerVisitor &MSV)
5827 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
5828 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
5830 ArgKind classifyArgument(
Type *
T) {
5837 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
5838 return ArgKind::Indirect;
5839 if (
T->isFloatingPointTy())
5840 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5841 if (
T->isIntegerTy() ||
T->isPointerTy())
5842 return ArgKind::GeneralPurpose;
5843 if (
T->isVectorTy())
5844 return ArgKind::Vector;
5845 return ArgKind::Memory;
5848 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
5858 return ShadowExtension::Zero;
5862 return ShadowExtension::Sign;
5864 return ShadowExtension::None;
5868 unsigned GpOffset = SystemZGpOffset;
5869 unsigned FpOffset = SystemZFpOffset;
5870 unsigned VrIndex = 0;
5871 unsigned OverflowOffset = SystemZOverflowOffset;
5878 ArgKind AK = classifyArgument(
T);
5879 if (AK == ArgKind::Indirect) {
5881 AK = ArgKind::GeneralPurpose;
5883 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5884 AK = ArgKind::Memory;
5885 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5886 AK = ArgKind::Memory;
5887 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5888 AK = ArgKind::Memory;
5889 Value *ShadowBase =
nullptr;
5890 Value *OriginBase =
nullptr;
5891 ShadowExtension SE = ShadowExtension::None;
5893 case ArgKind::GeneralPurpose: {
5898 SE = getShadowExtension(CB, ArgNo);
5900 if (SE == ShadowExtension::None) {
5902 assert(ArgAllocSize <= ArgSize);
5903 GapSize = ArgSize - ArgAllocSize;
5905 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5906 if (MS.TrackOrigins)
5907 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5909 GpOffset += ArgSize;
5915 case ArgKind::FloatingPoint: {
5924 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
5925 if (MS.TrackOrigins)
5926 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5928 FpOffset += ArgSize;
5934 case ArgKind::Vector: {
5941 case ArgKind::Memory: {
5949 SE = getShadowExtension(CB, ArgNo);
5951 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
5953 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
5954 if (MS.TrackOrigins)
5956 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
5957 OverflowOffset += ArgSize;
5964 case ArgKind::Indirect:
5967 if (ShadowBase ==
nullptr)
5969 Value *Shadow = MSV.getShadow(
A);
5970 if (SE != ShadowExtension::None)
5971 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
5972 SE == ShadowExtension::Sign);
5973 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
5975 if (MS.TrackOrigins) {
5976 Value *Origin = MSV.getOrigin(
A);
5978 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5982 Constant *OverflowSize = ConstantInt::get(
5983 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
5984 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5991 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
5994 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5996 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5997 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
6002 unsigned RegSaveAreaSize =
6003 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
6004 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6006 if (MS.TrackOrigins)
6007 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
6008 Alignment, RegSaveAreaSize);
6017 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
6019 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
6020 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
6022 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
6023 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
6026 SystemZOverflowOffset);
6027 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
6029 if (MS.TrackOrigins) {
6031 SystemZOverflowOffset);
6032 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
6037 void finalizeInstrumentation()
override {
6038 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6039 "finalizeInstrumentation called twice");
6040 if (!VAStartInstrumentationList.empty()) {
6047 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
6055 Intrinsic::umin, CopySize,
6059 if (MS.TrackOrigins) {
6069 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6070 NextNodeIRBuilder IRB(OrigInst);
6071 Value *VAListTag = OrigInst->getArgOperand(0);
6072 copyRegSaveArea(IRB, VAListTag);
6073 copyOverflowArea(IRB, VAListTag);
6079struct VarArgI386Helper :
public VarArgHelperBase {
6081 Value *VAArgSize =
nullptr;
6083 VarArgI386Helper(
Function &
F, MemorySanitizer &MS,
6084 MemorySanitizerVisitor &MSV)
6085 : VarArgHelperBase(
F, MS, MSV, 4) {}
6089 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6090 unsigned VAArgOffset = 0;
6093 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
6095 assert(
A->getType()->isPointerTy());
6097 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
6099 if (ArgAlign < IntptrSize)
6100 ArgAlign =
Align(IntptrSize);
6101 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6103 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6105 Value *AShadowPtr, *AOriginPtr;
6106 std::tie(AShadowPtr, AOriginPtr) =
6107 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
6117 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6119 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6120 if (
DL.isBigEndian()) {
6123 if (ArgSize < IntptrSize)
6124 VAArgOffset += (IntptrSize - ArgSize);
6127 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6130 VAArgOffset += ArgSize;
6136 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6139 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6142 void finalizeInstrumentation()
override {
6143 assert(!VAArgSize && !VAArgTLSCopy &&
6144 "finalizeInstrumentation called twice");
6147 Value *CopySize = VAArgSize;
6149 if (!VAStartInstrumentationList.empty()) {
6158 Intrinsic::umin, CopySize,
6166 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6167 NextNodeIRBuilder IRB(OrigInst);
6168 Value *VAListTag = OrigInst->getArgOperand(0);
6169 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6170 Value *RegSaveAreaPtrPtr =
6172 PointerType::get(RegSaveAreaPtrTy, 0));
6173 Value *RegSaveAreaPtr =
6174 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6175 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6177 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6179 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6180 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6182 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6190struct VarArgGenericHelper :
public VarArgHelperBase {
6192 Value *VAArgSize =
nullptr;
6194 VarArgGenericHelper(
Function &
F, MemorySanitizer &MS,
6195 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
6196 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
6199 unsigned VAArgOffset = 0;
6201 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6206 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6207 if (
DL.isBigEndian()) {
6210 if (ArgSize < IntptrSize)
6211 VAArgOffset += (IntptrSize - ArgSize);
6213 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6214 VAArgOffset += ArgSize;
6215 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
6221 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6224 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6227 void finalizeInstrumentation()
override {
6228 assert(!VAArgSize && !VAArgTLSCopy &&
6229 "finalizeInstrumentation called twice");
6232 Value *CopySize = VAArgSize;
6234 if (!VAStartInstrumentationList.empty()) {
6243 Intrinsic::umin, CopySize,
6251 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6252 NextNodeIRBuilder IRB(OrigInst);
6253 Value *VAListTag = OrigInst->getArgOperand(0);
6254 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6255 Value *RegSaveAreaPtrPtr =
6257 PointerType::get(RegSaveAreaPtrTy, 0));
6258 Value *RegSaveAreaPtr =
6259 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6260 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6262 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6264 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6265 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6267 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6275using VarArgARM32Helper = VarArgGenericHelper;
6276using VarArgRISCVHelper = VarArgGenericHelper;
6277using VarArgMIPSHelper = VarArgGenericHelper;
6278using VarArgLoongArch64Helper = VarArgGenericHelper;
6281struct VarArgNoOpHelper :
public VarArgHelper {
6282 VarArgNoOpHelper(
Function &
F, MemorySanitizer &MS,
6283 MemorySanitizerVisitor &MSV) {}
6291 void finalizeInstrumentation()
override {}
6297 MemorySanitizerVisitor &Visitor) {
6300 Triple TargetTriple(Func.getParent()->getTargetTriple());
6303 return new VarArgI386Helper(Func, Msan, Visitor);
6306 return new VarArgAMD64Helper(Func, Msan, Visitor);
6308 if (TargetTriple.
isARM())
6309 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
6312 return new VarArgAArch64Helper(Func, Msan, Visitor);
6315 return new VarArgSystemZHelper(Func, Msan, Visitor);
6320 return new VarArgPowerPCHelper(Func, Msan, Visitor, 12);
6323 return new VarArgPowerPCHelper(Func, Msan, Visitor, 8);
6326 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
6329 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
6332 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
6335 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
6338 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
6341 return new VarArgNoOpHelper(Func, Msan, Visitor);
6348 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
6351 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
6355 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6358 return Visitor.runOnFunction();
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
static bool isAMustTailRetVal(Value *RetVal)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
static const MemoryMapParams Linux_LoongArch64_MemoryMapParams
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static bool shouldExecute(unsigned CounterName)
This instruction compares its operands according to the predicate given to the constructor.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
Base class for instruction visitors.
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This class represents a cast from an integer to a pointer.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
This class wraps the llvm.memcpy intrinsic.
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
This class represents a cast from a pointer to an integer.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
'undef' values are things that do not have specified contents.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This represents the llvm.va_copy intrinsic.
This represents the llvm.va_start intrinsic.
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
StringRef getName() const
Return a constant reference to the value's name.
Type * getElementType() const
This class represents zero extension of integer types.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Or
Bitwise or logical OR of integers.
std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, Instruction *SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.