184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
240 "msan-track-origins",
245 cl::desc(
"keep going after reporting a UMR"),
254 "msan-poison-stack-with-call",
259 "msan-poison-stack-pattern",
260 cl::desc(
"poison uninitialized stack variables with the given pattern"),
265 cl::desc(
"Print name of local stack variable"),
270 cl::desc(
"Poison fully undef temporary values. "
271 "Partially undefined constant vectors "
272 "are unaffected by this flag (see "
273 "-msan-poison-undef-vectors)."),
277 "msan-poison-undef-vectors",
278 cl::desc(
"Precisely poison partially undefined constant vectors. "
279 "If false (legacy behavior), the entire vector is "
280 "considered fully initialized, which may lead to false "
281 "negatives. Fully undefined constant vectors are "
282 "unaffected by this flag (see -msan-poison-undef)."),
286 "msan-precise-disjoint-or",
287 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
288 "disjointedness is ignored (i.e., 1|1 is initialized)."),
293 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
298 cl::desc(
"exact handling of relational integer ICmp"),
302 "msan-handle-lifetime-intrinsics",
304 "when possible, poison scoped variables at the beginning of the scope "
305 "(slower, but more precise)"),
316 "msan-handle-asm-conservative",
327 "msan-check-access-address",
328 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
333 cl::desc(
"check arguments and return values at function call boundaries"),
337 "msan-dump-strict-instructions",
338 cl::desc(
"print out instructions with default strict semantics i.e.,"
339 "check that all the inputs are fully initialized, and mark "
340 "the output as fully initialized. These semantics are applied "
341 "to instructions that could not be handled explicitly nor "
350 "msan-dump-heuristic-instructions",
351 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
352 "Use -msan-dump-strict-instructions to print instructions that "
353 "could not be handled explicitly nor heuristically."),
357 "msan-instrumentation-with-call-threshold",
359 "If the function being instrumented requires more than "
360 "this number of checks and origin stores, use callbacks instead of "
361 "inline checks (-1 means never use callbacks)."),
366 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
376 cl::desc(
"Insert checks for constant shadow values"),
383 cl::desc(
"Place MSan constructors in comdat sections"),
389 cl::desc(
"Define custom MSan AndMask"),
393 cl::desc(
"Define custom MSan XorMask"),
397 cl::desc(
"Define custom MSan ShadowBase"),
401 cl::desc(
"Define custom MSan OriginBase"),
406 cl::desc(
"Define threshold for number of checks per "
407 "debug location to force origin update."),
419struct MemoryMapParams {
426struct PlatformMemoryMapParams {
427 const MemoryMapParams *bits32;
428 const MemoryMapParams *bits64;
590class MemorySanitizer {
599 MemorySanitizer(MemorySanitizer &&) =
delete;
600 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
601 MemorySanitizer(
const MemorySanitizer &) =
delete;
602 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
604 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
607 friend struct MemorySanitizerVisitor;
608 friend struct VarArgHelperBase;
609 friend struct VarArgAMD64Helper;
610 friend struct VarArgAArch64Helper;
611 friend struct VarArgPowerPC64Helper;
612 friend struct VarArgPowerPC32Helper;
613 friend struct VarArgSystemZHelper;
614 friend struct VarArgI386Helper;
615 friend struct VarArgGenericHelper;
617 void initializeModule(
Module &M);
618 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
619 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
620 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
622 template <
typename... ArgsTy>
623 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
649 Value *ParamOriginTLS;
655 Value *RetvalOriginTLS;
661 Value *VAArgOriginTLS;
664 Value *VAArgOverflowSizeTLS;
667 bool CallbacksInitialized =
false;
670 FunctionCallee WarningFn;
674 FunctionCallee MaybeWarningVarSizeFn;
679 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
681 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
684 FunctionCallee MsanPoisonStackFn;
688 FunctionCallee MsanChainOriginFn;
691 FunctionCallee MsanSetOriginFn;
694 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
697 StructType *MsanContextStateTy;
698 FunctionCallee MsanGetContextStateFn;
701 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
707 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
708 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
709 FunctionCallee MsanMetadataPtrForStore_1_8[4];
710 FunctionCallee MsanInstrumentAsmStoreFn;
713 Value *MsanMetadataAlloca;
716 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
719 const MemoryMapParams *MapParams;
723 MemoryMapParams CustomMapParams;
725 MDNode *ColdCallWeights;
728 MDNode *OriginStoreWeights;
731void insertModuleCtor(
Module &M) {
768 if (!Options.Kernel) {
777 MemorySanitizer Msan(*
F.getParent(), Options);
796 OS, MapClassName2PassName);
802 if (Options.EagerChecks)
803 OS <<
"eager-checks;";
804 OS <<
"track-origins=" << Options.TrackOrigins;
820template <
typename... ArgsTy>
822MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
827 std::forward<ArgsTy>(Args)...);
830 return M.getOrInsertFunction(Name, MsanMetadata,
831 std::forward<ArgsTy>(Args)...);
840 RetvalOriginTLS =
nullptr;
842 ParamOriginTLS =
nullptr;
844 VAArgOriginTLS =
nullptr;
845 VAArgOverflowSizeTLS =
nullptr;
847 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
849 IRB.getVoidTy(), IRB.getInt32Ty());
860 MsanGetContextStateFn =
861 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
865 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
866 std::string name_load =
867 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
868 std::string name_store =
869 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
870 MsanMetadataPtrForLoad_1_8[ind] =
871 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
872 MsanMetadataPtrForStore_1_8[ind] =
873 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
876 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
877 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
878 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
879 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
882 MsanPoisonAllocaFn =
M.getOrInsertFunction(
883 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
884 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
885 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
889 return M.getOrInsertGlobal(Name, Ty, [&] {
891 nullptr, Name,
nullptr,
897void MemorySanitizer::createUserspaceApi(
Module &M,
905 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
906 :
"__msan_warning_with_origin_noreturn";
907 WarningFn =
M.getOrInsertFunction(WarningFnName,
909 IRB.getVoidTy(), IRB.getInt32Ty());
912 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
913 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
940 IRB.getIntPtrTy(
M.getDataLayout()));
944 unsigned AccessSize = 1 << AccessSizeIndex;
945 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
946 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
948 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
949 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
950 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
951 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
952 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
953 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
955 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
959 MsanSetAllocaOriginWithDescriptionFn =
960 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
961 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
962 MsanSetAllocaOriginNoDescriptionFn =
963 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
964 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
965 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
966 IRB.getVoidTy(), PtrTy, IntptrTy);
970void MemorySanitizer::initializeCallbacks(
Module &M,
973 if (CallbacksInitialized)
979 MsanChainOriginFn =
M.getOrInsertFunction(
980 "__msan_chain_origin",
983 MsanSetOriginFn =
M.getOrInsertFunction(
985 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
987 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
989 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
990 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
992 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
994 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
995 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
998 createKernelApi(M, TLI);
1000 createUserspaceApi(M, TLI);
1002 CallbacksInitialized =
true;
1008 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1026void MemorySanitizer::initializeModule(
Module &M) {
1027 auto &
DL =
M.getDataLayout();
1029 TargetTriple =
M.getTargetTriple();
1031 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1032 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1034 if (ShadowPassed || OriginPassed) {
1039 MapParams = &CustomMapParams;
1041 switch (TargetTriple.getOS()) {
1043 switch (TargetTriple.getArch()) {
1058 switch (TargetTriple.getArch()) {
1067 switch (TargetTriple.getArch()) {
1101 C = &(
M.getContext());
1103 IntptrTy = IRB.getIntPtrTy(
DL);
1104 OriginTy = IRB.getInt32Ty();
1105 PtrTy = IRB.getPtrTy();
1110 if (!CompileKernel) {
1112 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1113 return new GlobalVariable(
1114 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1115 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1119 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1120 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1121 GlobalValue::WeakODRLinkage,
1122 IRB.getInt32(Recover),
"__msan_keep_going");
1137struct VarArgHelper {
1138 virtual ~VarArgHelper() =
default;
1141 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1144 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1147 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1153 virtual void finalizeInstrumentation() = 0;
1156struct MemorySanitizerVisitor;
1161 MemorySanitizerVisitor &Visitor);
1168 if (TypeSizeFixed <= 8)
1177class NextNodeIRBuilder :
public IRBuilder<> {
1190struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1192 MemorySanitizer &MS;
1194 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1195 std::unique_ptr<VarArgHelper> VAHelper;
1196 const TargetLibraryInfo *TLI;
1203 bool PropagateShadow;
1206 bool PoisonUndefVectors;
1208 struct ShadowOriginAndInsertPoint {
1213 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1214 : Shadow(S), Origin(
O), OrigIns(
I) {}
1217 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1218 SmallSetVector<AllocaInst *, 16> AllocaSet;
1221 int64_t SplittableBlocksCount = 0;
1223 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1224 const TargetLibraryInfo &TLI)
1226 bool SanitizeFunction =
1228 InsertChecks = SanitizeFunction;
1229 PropagateShadow = SanitizeFunction;
1240 MS.initializeCallbacks(*
F.getParent(), TLI);
1242 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1243 .CreateIntrinsic(Intrinsic::donothing, {});
1245 if (MS.CompileKernel) {
1247 insertKmsanPrologue(IRB);
1251 <<
"MemorySanitizer is not inserting checks into '"
1252 <<
F.getName() <<
"'\n");
1255 bool instrumentWithCalls(
Value *V) {
1259 ++SplittableBlocksCount;
1264 bool isInPrologue(Instruction &
I) {
1265 return I.getParent() == FnPrologueEnd->
getParent() &&
1274 if (MS.TrackOrigins <= 1)
1276 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1280 const DataLayout &
DL =
F.getDataLayout();
1281 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1291 TypeSize TS, Align Alignment) {
1292 const DataLayout &
DL =
F.getDataLayout();
1293 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1294 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1306 auto [InsertPt,
Index] =
1318 Align CurrentAlignment = Alignment;
1319 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1320 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1322 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1327 CurrentAlignment = IntptrAlignment;
1340 Value *OriginPtr, Align Alignment) {
1341 const DataLayout &
DL =
F.getDataLayout();
1343 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1345 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1354 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1361 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1363 if (instrumentWithCalls(ConvertedShadow) &&
1365 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1366 Value *ConvertedShadow2 =
1368 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1372 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1376 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1381 void materializeStores() {
1382 for (StoreInst *SI : StoreList) {
1384 Value *Val =
SI->getValueOperand();
1385 Value *Addr =
SI->getPointerOperand();
1386 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1387 Value *ShadowPtr, *OriginPtr;
1389 const Align Alignment =
SI->getAlign();
1391 std::tie(ShadowPtr, OriginPtr) =
1392 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1394 [[maybe_unused]] StoreInst *NewSI =
1401 if (MS.TrackOrigins && !
SI->isAtomic())
1402 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1409 if (MS.TrackOrigins < 2)
1412 if (LazyWarningDebugLocationCount.
empty())
1413 for (
const auto &
I : InstrumentationList)
1414 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1430 auto NewDebugLoc = OI->getDebugLoc();
1437 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1438 Origin = updateOrigin(Origin, IRBOrigin);
1443 if (MS.CompileKernel || MS.TrackOrigins)
1454 const DataLayout &
DL =
F.getDataLayout();
1455 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1457 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1459 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1460 Value *ConvertedShadow2 =
1464 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1468 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1472 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1475 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1478 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1479 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1484 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1487 !MS.Recover, MS.ColdCallWeights);
1490 insertWarningFn(IRB, Origin);
1495 void materializeInstructionChecks(
1497 const DataLayout &
DL =
F.getDataLayout();
1500 bool Combine = !MS.TrackOrigins;
1502 Value *Shadow =
nullptr;
1503 for (
const auto &ShadowData : InstructionChecks) {
1504 assert(ShadowData.OrigIns == Instruction);
1507 Value *ConvertedShadow = ShadowData.Shadow;
1516 insertWarningFn(IRB, ShadowData.Origin);
1526 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1531 Shadow = ConvertedShadow;
1535 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1536 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1537 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1543 materializeOneCheck(IRB, Shadow,
nullptr);
1547 void materializeChecks() {
1550 SmallPtrSet<Instruction *, 16>
Done;
1553 for (
auto I = InstrumentationList.begin();
1554 I != InstrumentationList.end();) {
1555 auto OrigIns =
I->OrigIns;
1559 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1560 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1561 return OrigIns != R.OrigIns;
1575 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1576 {Zero, IRB.getInt32(0)},
"param_shadow");
1577 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1578 {Zero, IRB.getInt32(1)},
"retval_shadow");
1579 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1580 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1581 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1582 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1583 MS.VAArgOverflowSizeTLS =
1584 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1585 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1586 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1587 {Zero, IRB.getInt32(5)},
"param_origin");
1588 MS.RetvalOriginTLS =
1589 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1590 {Zero, IRB.getInt32(6)},
"retval_origin");
1592 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1605 for (Instruction *
I : Instructions)
1609 for (PHINode *PN : ShadowPHINodes) {
1611 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1612 size_t NumValues = PN->getNumIncomingValues();
1613 for (
size_t v = 0;
v < NumValues;
v++) {
1614 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1616 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1620 VAHelper->finalizeInstrumentation();
1625 for (
auto Item : LifetimeStartList) {
1626 instrumentAlloca(*Item.second, Item.first);
1627 AllocaSet.
remove(Item.second);
1632 for (AllocaInst *AI : AllocaSet)
1633 instrumentAlloca(*AI);
1636 materializeChecks();
1640 materializeStores();
1646 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1657 const DataLayout &
DL =
F.getDataLayout();
1659 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1661 VT->getElementCount());
1664 return ArrayType::get(getShadowTy(AT->getElementType()),
1665 AT->getNumElements());
1669 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1670 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1672 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1675 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1685 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1688 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1690 if (Aggregator != FalseVal)
1691 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1693 Aggregator = ShadowBool;
1700 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1702 if (!
Array->getNumElements())
1706 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1708 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1710 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1711 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1721 return collapseStructShadow(
Struct, V, IRB);
1723 return collapseArrayShadow(Array, V, IRB);
1728 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1736 Type *VTy =
V->getType();
1738 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1745 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1747 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1748 VectTy->getElementCount());
1754 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1756 return VectorType::get(
1757 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1758 VectTy->getElementCount());
1760 assert(IntPtrTy == MS.IntptrTy);
1767 VectTy->getElementCount(),
1768 constToIntPtr(VectTy->getElementType(),
C));
1770 assert(IntPtrTy == MS.IntptrTy);
1771 return ConstantInt::get(MS.IntptrTy,
C);
1784 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1787 if (uint64_t AndMask = MS.MapParams->AndMask)
1788 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1790 if (uint64_t XorMask = MS.MapParams->XorMask)
1791 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1803 std::pair<Value *, Value *>
1805 MaybeAlign Alignment) {
1810 assert(VectTy->getElementType()->isPointerTy());
1812 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1814 Value *ShadowLong = ShadowOffset;
1815 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1817 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1820 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1822 Value *OriginPtr =
nullptr;
1823 if (MS.TrackOrigins) {
1824 Value *OriginLong = ShadowOffset;
1825 uint64_t OriginBase = MS.MapParams->OriginBase;
1826 if (OriginBase != 0)
1828 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1831 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1834 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1836 return std::make_pair(ShadowPtr, OriginPtr);
1839 template <
typename... ArgsTy>
1844 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1845 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1848 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1851 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1855 Value *ShadowOriginPtrs;
1856 const DataLayout &
DL =
F.getDataLayout();
1857 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1859 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1862 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1864 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1865 ShadowOriginPtrs = createMetadataCall(
1867 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1874 return std::make_pair(ShadowPtr, OriginPtr);
1880 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1887 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1892 Value *ShadowPtrs = ConstantInt::getNullValue(
1894 Value *OriginPtrs =
nullptr;
1895 if (MS.TrackOrigins)
1896 OriginPtrs = ConstantInt::getNullValue(
1898 for (
unsigned i = 0; i < NumElements; ++i) {
1901 auto [ShadowPtr, OriginPtr] =
1902 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1905 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1906 if (MS.TrackOrigins)
1908 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1910 return {ShadowPtrs, OriginPtrs};
1913 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1915 MaybeAlign Alignment,
1917 if (MS.CompileKernel)
1918 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1919 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1927 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1932 if (!MS.TrackOrigins)
1935 ConstantInt::get(MS.IntptrTy, ArgOffset),
1945 Value *getOriginPtrForRetval() {
1947 return MS.RetvalOriginTLS;
1952 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1953 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1958 if (!MS.TrackOrigins)
1960 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1961 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1962 OriginMap[
V] = Origin;
1966 Type *ShadowTy = getShadowTy(OrigTy);
1976 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1985 getPoisonedShadow(AT->getElementType()));
1990 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1991 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1999 Type *ShadowTy = getShadowTy(V);
2002 return getPoisonedShadow(ShadowTy);
2014 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2015 return getCleanShadow(V);
2017 Value *Shadow = ShadowMap[
V];
2019 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2020 assert(Shadow &&
"No shadow for a value");
2027 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2028 : getCleanShadow(V);
2034 Value *&ShadowPtr = ShadowMap[
V];
2039 unsigned ArgOffset = 0;
2040 const DataLayout &
DL =
F->getDataLayout();
2041 for (
auto &FArg :
F->args()) {
2042 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2044 ?
"vscale not fully supported\n"
2045 :
"Arg is not sized\n"));
2047 ShadowPtr = getCleanShadow(V);
2048 setOrigin(
A, getCleanOrigin());
2054 unsigned Size = FArg.hasByValAttr()
2055 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2056 :
DL.getTypeAllocSize(FArg.getType());
2060 if (FArg.hasByValAttr()) {
2064 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2065 FArg.getParamAlign(), FArg.getParamByValType());
2066 Value *CpShadowPtr, *CpOriginPtr;
2067 std::tie(CpShadowPtr, CpOriginPtr) =
2068 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2070 if (!PropagateShadow || Overflow) {
2072 EntryIRB.CreateMemSet(
2076 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2078 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2079 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2082 if (MS.TrackOrigins) {
2083 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2087 EntryIRB.CreateMemCpy(
2096 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2097 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2098 ShadowPtr = getCleanShadow(V);
2099 setOrigin(
A, getCleanOrigin());
2102 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2103 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2105 if (MS.TrackOrigins) {
2106 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2107 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2111 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2117 assert(ShadowPtr &&
"Could not find shadow for an argument");
2124 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2125 PoisonUndefVectors) {
2128 for (
unsigned i = 0; i != NumElems; ++i) {
2131 : getCleanShadow(Elem);
2135 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2136 << *ShadowConstant <<
"\n");
2138 return ShadowConstant;
2144 return getCleanShadow(V);
2148 Value *getShadow(Instruction *
I,
int i) {
2149 return getShadow(
I->getOperand(i));
2154 if (!MS.TrackOrigins)
2157 return getCleanOrigin();
2159 "Unexpected value type in getOrigin()");
2161 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2162 return getCleanOrigin();
2164 Value *Origin = OriginMap[
V];
2165 assert(Origin &&
"Missing origin");
2170 Value *getOrigin(Instruction *
I,
int i) {
2171 return getOrigin(
I->getOperand(i));
2178 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2184 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2185 << *OrigIns <<
"\n");
2192 "Can only insert checks for integer, vector, and aggregate shadow "
2195 InstrumentationList.push_back(
2196 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2204 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2206 Value *Shadow, *Origin;
2208 Shadow = getShadow(Val);
2211 Origin = getOrigin(Val);
2218 insertCheckShadow(Shadow, Origin, OrigIns);
2223 case AtomicOrdering::NotAtomic:
2224 return AtomicOrdering::NotAtomic;
2225 case AtomicOrdering::Unordered:
2226 case AtomicOrdering::Monotonic:
2227 case AtomicOrdering::Release:
2228 return AtomicOrdering::Release;
2229 case AtomicOrdering::Acquire:
2230 case AtomicOrdering::AcquireRelease:
2231 return AtomicOrdering::AcquireRelease;
2232 case AtomicOrdering::SequentiallyConsistent:
2233 return AtomicOrdering::SequentiallyConsistent;
2239 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2240 uint32_t OrderingTable[NumOrderings] = {};
2242 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2243 OrderingTable[(
int)AtomicOrderingCABI::release] =
2244 (int)AtomicOrderingCABI::release;
2245 OrderingTable[(int)AtomicOrderingCABI::consume] =
2246 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2247 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2248 (
int)AtomicOrderingCABI::acq_rel;
2249 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2250 (
int)AtomicOrderingCABI::seq_cst;
2257 case AtomicOrdering::NotAtomic:
2258 return AtomicOrdering::NotAtomic;
2259 case AtomicOrdering::Unordered:
2260 case AtomicOrdering::Monotonic:
2261 case AtomicOrdering::Acquire:
2262 return AtomicOrdering::Acquire;
2263 case AtomicOrdering::Release:
2264 case AtomicOrdering::AcquireRelease:
2265 return AtomicOrdering::AcquireRelease;
2266 case AtomicOrdering::SequentiallyConsistent:
2267 return AtomicOrdering::SequentiallyConsistent;
2273 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2274 uint32_t OrderingTable[NumOrderings] = {};
2276 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2277 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2278 OrderingTable[(int)AtomicOrderingCABI::consume] =
2279 (
int)AtomicOrderingCABI::acquire;
2280 OrderingTable[(int)AtomicOrderingCABI::release] =
2281 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2282 (int)AtomicOrderingCABI::acq_rel;
2283 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2284 (
int)AtomicOrderingCABI::seq_cst;
2290 using InstVisitor<MemorySanitizerVisitor>
::visit;
2291 void visit(Instruction &
I) {
2292 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2295 if (isInPrologue(
I))
2300 setShadow(&
I, getCleanShadow(&
I));
2301 setOrigin(&
I, getCleanOrigin());
2312 void visitLoadInst(LoadInst &
I) {
2313 assert(
I.getType()->isSized() &&
"Load type must have size");
2314 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2315 NextNodeIRBuilder IRB(&
I);
2316 Type *ShadowTy = getShadowTy(&
I);
2317 Value *Addr =
I.getPointerOperand();
2318 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2319 const Align Alignment =
I.getAlign();
2320 if (PropagateShadow) {
2321 std::tie(ShadowPtr, OriginPtr) =
2322 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2326 setShadow(&
I, getCleanShadow(&
I));
2330 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2335 if (MS.TrackOrigins) {
2336 if (PropagateShadow) {
2341 setOrigin(&
I, getCleanOrigin());
2350 void visitStoreInst(StoreInst &
I) {
2351 StoreList.push_back(&
I);
2353 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2356 void handleCASOrRMW(Instruction &
I) {
2360 Value *Addr =
I.getOperand(0);
2361 Value *Val =
I.getOperand(1);
2362 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2367 insertCheckShadowOf(Addr, &
I);
2373 insertCheckShadowOf(Val, &
I);
2377 setShadow(&
I, getCleanShadow(&
I));
2378 setOrigin(&
I, getCleanOrigin());
2381 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2386 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2392 void visitExtractElementInst(ExtractElementInst &
I) {
2393 insertCheckShadowOf(
I.getOperand(1), &
I);
2397 setOrigin(&
I, getOrigin(&
I, 0));
2400 void visitInsertElementInst(InsertElementInst &
I) {
2401 insertCheckShadowOf(
I.getOperand(2), &
I);
2403 auto *Shadow0 = getShadow(&
I, 0);
2404 auto *Shadow1 = getShadow(&
I, 1);
2407 setOriginForNaryOp(
I);
2410 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2412 auto *Shadow0 = getShadow(&
I, 0);
2413 auto *Shadow1 = getShadow(&
I, 1);
2416 setOriginForNaryOp(
I);
2420 void visitSExtInst(SExtInst &
I) {
2422 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2423 setOrigin(&
I, getOrigin(&
I, 0));
2426 void visitZExtInst(ZExtInst &
I) {
2428 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2429 setOrigin(&
I, getOrigin(&
I, 0));
2432 void visitTruncInst(TruncInst &
I) {
2434 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2435 setOrigin(&
I, getOrigin(&
I, 0));
2438 void visitBitCastInst(BitCastInst &
I) {
2443 if (CI->isMustTailCall())
2447 setOrigin(&
I, getOrigin(&
I, 0));
2450 void visitPtrToIntInst(PtrToIntInst &
I) {
2453 "_msprop_ptrtoint"));
2454 setOrigin(&
I, getOrigin(&
I, 0));
2457 void visitIntToPtrInst(IntToPtrInst &
I) {
2460 "_msprop_inttoptr"));
2461 setOrigin(&
I, getOrigin(&
I, 0));
2464 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2465 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2466 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2467 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2468 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2469 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2476 void visitAnd(BinaryOperator &
I) {
2484 Value *S2 = getShadow(&
I, 1);
2485 Value *V1 =
I.getOperand(0);
2486 Value *V2 =
I.getOperand(1);
2494 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2495 setOriginForNaryOp(
I);
2498 void visitOr(BinaryOperator &
I) {
2511 Value *S2 = getShadow(&
I, 1);
2512 Value *V1 =
I.getOperand(0);
2513 Value *V2 =
I.getOperand(1);
2532 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2536 setOriginForNaryOp(
I);
2554 template <
bool CombineShadow>
class Combiner {
2555 Value *Shadow =
nullptr;
2556 Value *Origin =
nullptr;
2558 MemorySanitizerVisitor *MSV;
2561 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2562 : IRB(IRB), MSV(MSV) {}
2566 if (CombineShadow) {
2571 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2572 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2576 if (MSV->MS.TrackOrigins) {
2583 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2584 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2594 Value *OpShadow = MSV->getShadow(V);
2595 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2596 return Add(OpShadow, OpOrigin);
2601 void Done(Instruction *
I) {
2602 if (CombineShadow) {
2604 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2605 MSV->setShadow(
I, Shadow);
2607 if (MSV->MS.TrackOrigins) {
2609 MSV->setOrigin(
I, Origin);
2615 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2616 if (MSV->MS.TrackOrigins) {
2623 using ShadowAndOriginCombiner = Combiner<true>;
2624 using OriginCombiner = Combiner<false>;
2627 void setOriginForNaryOp(Instruction &
I) {
2628 if (!MS.TrackOrigins)
2631 OriginCombiner
OC(
this, IRB);
2632 for (Use &
Op :
I.operands())
2637 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2639 "Vector of pointers is not a valid shadow type");
2649 Type *srcTy =
V->getType();
2652 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2653 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2654 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2672 Type *ShadowTy = getShadowTy(V);
2673 if (
V->getType() == ShadowTy)
2675 if (
V->getType()->isPtrOrPtrVectorTy())
2682 void handleShadowOr(Instruction &
I) {
2684 ShadowAndOriginCombiner SC(
this, IRB);
2685 for (Use &
Op :
I.operands())
2702 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2705 unsigned TotalNumElems =
2710 TotalNumElems = TotalNumElems * 2;
2713 assert(TotalNumElems % ReductionFactor == 0);
2718 for (
unsigned i = 0; i < ReductionFactor; i++) {
2719 SmallVector<int, 16>
Mask;
2720 for (
unsigned X = 0;
X < TotalNumElems;
X += ReductionFactor)
2721 Mask.push_back(
X + i);
2743 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I) {
2744 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2746 assert(
I.getType()->isVectorTy());
2747 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2749 [[maybe_unused]] FixedVectorType *ParamType =
2753 [[maybe_unused]] FixedVectorType *
ReturnType =
2761 Value *FirstArgShadow = getShadow(&
I, 0);
2762 Value *SecondArgShadow =
nullptr;
2763 if (
I.arg_size() == 2)
2764 SecondArgShadow = getShadow(&
I, 1);
2766 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2769 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2771 setShadow(&
I, OrShadow);
2772 setOriginForNaryOp(
I);
2782 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
2783 int ReinterpretElemWidth) {
2784 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2786 assert(
I.getType()->isVectorTy());
2787 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2789 FixedVectorType *ParamType =
2794 [[maybe_unused]] FixedVectorType *
ReturnType =
2801 FixedVectorType *ReinterpretShadowTy =
nullptr;
2809 Value *FirstArgShadow = getShadow(&
I, 0);
2810 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2820 Value *SecondArgShadow =
nullptr;
2821 if (
I.arg_size() == 2) {
2822 SecondArgShadow = getShadow(&
I, 1);
2823 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2826 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2829 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2831 setShadow(&
I, OrShadow);
2832 setOriginForNaryOp(
I);
2835 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2846 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2852 Type *EltTy = VTy->getElementType();
2854 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2855 if (ConstantInt *Elt =
2857 const APInt &
V = Elt->getValue();
2858 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2859 Elements.push_back(ConstantInt::get(EltTy, V2));
2861 Elements.push_back(ConstantInt::get(EltTy, 1));
2867 const APInt &
V = Elt->getValue();
2868 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2869 ShadowMul = ConstantInt::get(Ty, V2);
2871 ShadowMul = ConstantInt::get(Ty, 1);
2877 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2878 setOrigin(&
I, getOrigin(OtherArg));
2881 void visitMul(BinaryOperator &
I) {
2884 if (constOp0 && !constOp1)
2885 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2886 else if (constOp1 && !constOp0)
2887 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2892 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2893 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2894 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2895 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2896 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2897 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2899 void handleIntegerDiv(Instruction &
I) {
2902 insertCheckShadowOf(
I.getOperand(1), &
I);
2903 setShadow(&
I, getShadow(&
I, 0));
2904 setOrigin(&
I, getOrigin(&
I, 0));
2907 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2908 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2909 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2910 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2914 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2915 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2921 void handleEqualityComparison(ICmpInst &
I) {
2925 Value *Sa = getShadow(
A);
2926 Value *Sb = getShadow(
B);
2952 setOriginForNaryOp(
I);
2960 void handleRelationalComparisonExact(ICmpInst &
I) {
2964 Value *Sa = getShadow(
A);
2965 Value *Sb = getShadow(
B);
2976 bool IsSigned =
I.isSigned();
2978 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2988 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2993 return std::make_pair(Min, Max);
2996 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
2997 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3003 setOriginForNaryOp(
I);
3010 void handleSignedRelationalComparison(ICmpInst &
I) {
3015 op =
I.getOperand(0);
3016 pre =
I.getPredicate();
3018 op =
I.getOperand(1);
3019 pre =
I.getSwappedPredicate();
3032 setShadow(&
I, Shadow);
3033 setOrigin(&
I, getOrigin(
op));
3039 void visitICmpInst(ICmpInst &
I) {
3044 if (
I.isEquality()) {
3045 handleEqualityComparison(
I);
3051 handleRelationalComparisonExact(
I);
3055 handleSignedRelationalComparison(
I);
3061 handleRelationalComparisonExact(
I);
3068 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3070 void handleShift(BinaryOperator &
I) {
3075 Value *S2 = getShadow(&
I, 1);
3078 Value *V2 =
I.getOperand(1);
3080 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3081 setOriginForNaryOp(
I);
3084 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3085 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3086 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3088 void handleFunnelShift(IntrinsicInst &
I) {
3092 Value *S0 = getShadow(&
I, 0);
3094 Value *S2 = getShadow(&
I, 2);
3097 Value *V2 =
I.getOperand(2);
3100 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3101 setOriginForNaryOp(
I);
3114 void visitMemMoveInst(MemMoveInst &
I) {
3115 getShadow(
I.getArgOperand(1));
3118 {I.getArgOperand(0), I.getArgOperand(1),
3119 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3137 void visitMemCpyInst(MemCpyInst &
I) {
3138 getShadow(
I.getArgOperand(1));
3141 {I.getArgOperand(0), I.getArgOperand(1),
3142 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3147 void visitMemSetInst(MemSetInst &
I) {
3151 {I.getArgOperand(0),
3152 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3153 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3157 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3159 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3165 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3169 Value *Addr =
I.getArgOperand(0);
3170 Value *Shadow = getShadow(&
I, 1);
3171 Value *ShadowPtr, *OriginPtr;
3175 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3180 insertCheckShadowOf(Addr, &
I);
3183 if (MS.TrackOrigins)
3192 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3196 Value *Addr =
I.getArgOperand(0);
3198 Type *ShadowTy = getShadowTy(&
I);
3199 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3200 if (PropagateShadow) {
3204 std::tie(ShadowPtr, OriginPtr) =
3205 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3209 setShadow(&
I, getCleanShadow(&
I));
3213 insertCheckShadowOf(Addr, &
I);
3215 if (MS.TrackOrigins) {
3216 if (PropagateShadow)
3217 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3219 setOrigin(&
I, getCleanOrigin());
3239 [[maybe_unused]]
bool
3240 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3241 unsigned int trailingFlags) {
3242 Type *RetTy =
I.getType();
3246 unsigned NumArgOperands =
I.arg_size();
3247 assert(NumArgOperands >= trailingFlags);
3248 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3249 Type *Ty =
I.getArgOperand(i)->getType();
3255 ShadowAndOriginCombiner SC(
this, IRB);
3256 for (
unsigned i = 0; i < NumArgOperands; ++i)
3257 SC.Add(
I.getArgOperand(i));
3274 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3275 unsigned NumArgOperands =
I.arg_size();
3276 if (NumArgOperands == 0)
3279 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3280 I.getArgOperand(1)->getType()->isVectorTy() &&
3281 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3283 return handleVectorStoreIntrinsic(
I);
3286 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3287 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3289 return handleVectorLoadIntrinsic(
I);
3292 if (
I.doesNotAccessMemory())
3293 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3301 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3302 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3306 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3313 void handleInvariantGroup(IntrinsicInst &
I) {
3314 setShadow(&
I, getShadow(&
I, 0));
3315 setOrigin(&
I, getOrigin(&
I, 0));
3318 void handleLifetimeStart(IntrinsicInst &
I) {
3323 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3326 void handleBswap(IntrinsicInst &
I) {
3329 Type *OpType =
Op->getType();
3332 setOrigin(&
I, getOrigin(
Op));
3353 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3355 Value *Src =
I.getArgOperand(0);
3356 Value *SrcShadow = getShadow(Src);
3360 I.getType(),
I.getIntrinsicID(), {Src, False});
3362 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3365 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3367 Value *NotAllZeroShadow =
3369 Value *OutputShadow =
3370 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3376 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3379 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3381 setShadow(&
I, OutputShadow);
3382 setOriginForNaryOp(
I);
3392 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3396 Value *S0 = getShadow(&
I, 0);
3405 setShadow(&
I, OutShadow);
3406 setOriginForNaryOp(
I);
3415 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3435 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3440 Value *FullShadow = getCleanShadow(&
I);
3441 unsigned ShadowNumElems =
3443 unsigned FullShadowNumElems =
3446 assert((ShadowNumElems == FullShadowNumElems) ||
3447 (ShadowNumElems * 2 == FullShadowNumElems));
3449 if (ShadowNumElems == FullShadowNumElems) {
3450 FullShadow = Shadow;
3454 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3479 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3480 bool HasRoundingMode) {
3481 if (HasRoundingMode) {
3489 Value *Src =
I.getArgOperand(0);
3490 assert(Src->getType()->isVectorTy());
3494 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3497 Value *S0 = getShadow(&
I, 0);
3509 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3511 setShadow(&
I, FullShadow);
3512 setOriginForNaryOp(
I);
3533 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3534 bool HasRoundingMode =
false) {
3536 Value *CopyOp, *ConvertOp;
3538 assert((!HasRoundingMode ||
3540 "Invalid rounding mode");
3542 switch (
I.arg_size() - HasRoundingMode) {
3544 CopyOp =
I.getArgOperand(0);
3545 ConvertOp =
I.getArgOperand(1);
3548 ConvertOp =
I.getArgOperand(0);
3562 Value *ConvertShadow = getShadow(ConvertOp);
3563 Value *AggShadow =
nullptr;
3566 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3567 for (
int i = 1; i < NumUsedElements; ++i) {
3569 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3570 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3573 AggShadow = ConvertShadow;
3576 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3583 Value *ResultShadow = getShadow(CopyOp);
3585 for (
int i = 0; i < NumUsedElements; ++i) {
3587 ResultShadow, ConstantInt::getNullValue(EltTy),
3590 setShadow(&
I, ResultShadow);
3591 setOrigin(&
I, getOrigin(CopyOp));
3593 setShadow(&
I, getCleanShadow(&
I));
3594 setOrigin(&
I, getCleanOrigin());
3602 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3605 return CreateShadowCast(IRB, S2,
T,
true);
3613 return CreateShadowCast(IRB, S2,
T,
true);
3630 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3636 Value *S2 = getShadow(&
I, 1);
3638 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3639 Value *V1 =
I.getOperand(0);
3640 Value *V2 =
I.getOperand(1);
3642 {IRB.CreateBitCast(S1, V1->getType()), V2});
3644 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3645 setOriginForNaryOp(
I);
3650 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3651 unsigned X86_MMXSizeInBits = 64) {
3652 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3653 "Illegal MMX vector element size");
3655 X86_MMXSizeInBits / EltSizeInBits);
3662 case Intrinsic::x86_sse2_packsswb_128:
3663 case Intrinsic::x86_sse2_packuswb_128:
3664 return Intrinsic::x86_sse2_packsswb_128;
3666 case Intrinsic::x86_sse2_packssdw_128:
3667 case Intrinsic::x86_sse41_packusdw:
3668 return Intrinsic::x86_sse2_packssdw_128;
3670 case Intrinsic::x86_avx2_packsswb:
3671 case Intrinsic::x86_avx2_packuswb:
3672 return Intrinsic::x86_avx2_packsswb;
3674 case Intrinsic::x86_avx2_packssdw:
3675 case Intrinsic::x86_avx2_packusdw:
3676 return Intrinsic::x86_avx2_packssdw;
3678 case Intrinsic::x86_mmx_packsswb:
3679 case Intrinsic::x86_mmx_packuswb:
3680 return Intrinsic::x86_mmx_packsswb;
3682 case Intrinsic::x86_mmx_packssdw:
3683 return Intrinsic::x86_mmx_packssdw;
3685 case Intrinsic::x86_avx512_packssdw_512:
3686 case Intrinsic::x86_avx512_packusdw_512:
3687 return Intrinsic::x86_avx512_packssdw_512;
3689 case Intrinsic::x86_avx512_packsswb_512:
3690 case Intrinsic::x86_avx512_packuswb_512:
3691 return Intrinsic::x86_avx512_packsswb_512;
3707 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3708 unsigned MMXEltSizeInBits = 0) {
3712 Value *S2 = getShadow(&
I, 1);
3713 assert(
S1->getType()->isVectorTy());
3719 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3720 if (MMXEltSizeInBits) {
3728 if (MMXEltSizeInBits) {
3734 {S1_ext, S2_ext},
nullptr,
3735 "_msprop_vector_pack");
3736 if (MMXEltSizeInBits)
3739 setOriginForNaryOp(
I);
3743 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3756 const unsigned Width =
3763 Value *DstMaskV = createDppMask(Width, DstMask);
3780 void handleDppIntrinsic(IntrinsicInst &
I) {
3783 Value *S0 = getShadow(&
I, 0);
3787 const unsigned Width =
3789 assert(Width == 2 || Width == 4 || Width == 8);
3792 const unsigned SrcMask =
Mask >> 4;
3793 const unsigned DstMask =
Mask & 0xf;
3796 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3801 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3808 setOriginForNaryOp(
I);
3812 C = CreateAppToShadowCast(IRB,
C);
3821 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3826 Value *Sc = getShadow(&
I, 2);
3827 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3832 C = convertBlendvToSelectMask(IRB,
C);
3833 Sc = convertBlendvToSelectMask(IRB, Sc);
3839 handleSelectLikeInst(
I,
C,
T,
F);
3843 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3844 const unsigned SignificantBitsPerResultElement = 16;
3846 unsigned ZeroBitsPerResultElement =
3850 auto *Shadow0 = getShadow(&
I, 0);
3851 auto *Shadow1 = getShadow(&
I, 1);
3856 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3859 setOriginForNaryOp(
I);
3877 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3878 unsigned EltSizeInBits = 0) {
3881 [[maybe_unused]] FixedVectorType *
ReturnType =
3886 Value *Va =
nullptr;
3887 Value *Vb =
nullptr;
3888 Value *Sa =
nullptr;
3889 Value *Sb =
nullptr;
3891 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3892 if (
I.arg_size() == 2) {
3893 Va =
I.getOperand(0);
3894 Vb =
I.getOperand(1);
3896 Sa = getShadow(&
I, 0);
3897 Sb = getShadow(&
I, 1);
3898 }
else if (
I.arg_size() == 3) {
3900 Va =
I.getOperand(1);
3901 Vb =
I.getOperand(2);
3903 Sa = getShadow(&
I, 1);
3904 Sb = getShadow(&
I, 2);
3913 if (
I.arg_size() == 3) {
3914 [[maybe_unused]]
auto *AccumulatorType =
3916 assert(AccumulatorType == ReturnType);
3919 FixedVectorType *ImplicitReturnType =
ReturnType;
3921 if (EltSizeInBits) {
3923 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3935 ReturnType->getNumElements() * ReductionFactor);
3961 Value *
And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
3980 ImplicitReturnType);
3985 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
3988 if (
I.arg_size() == 3)
3989 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
3991 setShadow(&
I, OutShadow);
3992 setOriginForNaryOp(
I);
3998 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4000 Type *ResTy = getShadowTy(&
I);
4001 auto *Shadow0 = getShadow(&
I, 0);
4002 auto *Shadow1 = getShadow(&
I, 1);
4007 setOriginForNaryOp(
I);
4013 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4015 auto *Shadow0 = getShadow(&
I, 0);
4016 auto *Shadow1 = getShadow(&
I, 1);
4018 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4020 setOriginForNaryOp(
I);
4029 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4034 if (AllowShadowCast)
4035 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4039 setOriginForNaryOp(
I);
4049 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4053 Value *Shadow0 = getShadow(&
I, 0);
4059 setOriginForNaryOp(
I);
4065 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4069 Value *OperandShadow = getShadow(&
I, 0);
4071 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4079 setOrigin(&
I, getOrigin(&
I, 0));
4085 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4089 Value *OperandShadow = getShadow(&
I, 0);
4090 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4098 setOrigin(&
I, getOrigin(&
I, 0));
4101 void handleStmxcsr(IntrinsicInst &
I) {
4103 Value *Addr =
I.getArgOperand(0);
4106 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4111 insertCheckShadowOf(Addr, &
I);
4114 void handleLdmxcsr(IntrinsicInst &
I) {
4119 Value *Addr =
I.getArgOperand(0);
4122 Value *ShadowPtr, *OriginPtr;
4123 std::tie(ShadowPtr, OriginPtr) =
4124 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4127 insertCheckShadowOf(Addr, &
I);
4130 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4132 insertCheckShadow(Shadow, Origin, &
I);
4135 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4138 MaybeAlign
Align =
I.getParamAlign(0);
4140 Value *PassThru =
I.getArgOperand(2);
4143 insertCheckShadowOf(
Ptr, &
I);
4144 insertCheckShadowOf(Mask, &
I);
4147 if (!PropagateShadow) {
4148 setShadow(&
I, getCleanShadow(&
I));
4149 setOrigin(&
I, getCleanOrigin());
4153 Type *ShadowTy = getShadowTy(&
I);
4155 auto [ShadowPtr, OriginPtr] =
4156 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
false);
4160 getShadow(PassThru),
"_msmaskedexpload");
4162 setShadow(&
I, Shadow);
4165 setOrigin(&
I, getCleanOrigin());
4168 void handleMaskedCompressStore(IntrinsicInst &
I) {
4170 Value *Values =
I.getArgOperand(0);
4172 MaybeAlign
Align =
I.getParamAlign(1);
4176 insertCheckShadowOf(
Ptr, &
I);
4177 insertCheckShadowOf(Mask, &
I);
4180 Value *Shadow = getShadow(Values);
4181 Type *ElementShadowTy =
4183 auto [ShadowPtr, OriginPtrs] =
4184 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
true);
4191 void handleMaskedGather(IntrinsicInst &
I) {
4193 Value *Ptrs =
I.getArgOperand(0);
4194 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4196 Value *PassThru =
I.getArgOperand(2);
4198 Type *PtrsShadowTy = getShadowTy(Ptrs);
4200 insertCheckShadowOf(Mask, &
I);
4204 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4207 if (!PropagateShadow) {
4208 setShadow(&
I, getCleanShadow(&
I));
4209 setOrigin(&
I, getCleanOrigin());
4213 Type *ShadowTy = getShadowTy(&
I);
4215 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4216 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4220 getShadow(PassThru),
"_msmaskedgather");
4222 setShadow(&
I, Shadow);
4225 setOrigin(&
I, getCleanOrigin());
4228 void handleMaskedScatter(IntrinsicInst &
I) {
4230 Value *Values =
I.getArgOperand(0);
4231 Value *Ptrs =
I.getArgOperand(1);
4232 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4235 Type *PtrsShadowTy = getShadowTy(Ptrs);
4237 insertCheckShadowOf(Mask, &
I);
4241 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4244 Value *Shadow = getShadow(Values);
4245 Type *ElementShadowTy =
4247 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4248 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4259 void handleMaskedStore(IntrinsicInst &
I) {
4261 Value *
V =
I.getArgOperand(0);
4263 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4265 Value *Shadow = getShadow(V);
4268 insertCheckShadowOf(
Ptr, &
I);
4269 insertCheckShadowOf(Mask, &
I);
4274 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4275 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4279 if (!MS.TrackOrigins)
4282 auto &
DL =
F.getDataLayout();
4283 paintOrigin(IRB, getOrigin(V), OriginPtr,
4292 void handleMaskedLoad(IntrinsicInst &
I) {
4295 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4297 Value *PassThru =
I.getArgOperand(2);
4300 insertCheckShadowOf(
Ptr, &
I);
4301 insertCheckShadowOf(Mask, &
I);
4304 if (!PropagateShadow) {
4305 setShadow(&
I, getCleanShadow(&
I));
4306 setOrigin(&
I, getCleanOrigin());
4310 Type *ShadowTy = getShadowTy(&
I);
4311 Value *ShadowPtr, *OriginPtr;
4312 std::tie(ShadowPtr, OriginPtr) =
4313 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
4315 getShadow(PassThru),
"_msmaskedld"));
4317 if (!MS.TrackOrigins)
4324 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4329 setOrigin(&
I, Origin);
4345 void handleAVXMaskedStore(IntrinsicInst &
I) {
4350 Value *Dst =
I.getArgOperand(0);
4351 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4356 Value *Src =
I.getArgOperand(2);
4361 Value *SrcShadow = getShadow(Src);
4364 insertCheckShadowOf(Dst, &
I);
4365 insertCheckShadowOf(Mask, &
I);
4368 Value *DstShadowPtr;
4369 Value *DstOriginPtr;
4370 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4371 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4373 SmallVector<Value *, 2> ShadowArgs;
4374 ShadowArgs.
append(1, DstShadowPtr);
4375 ShadowArgs.
append(1, Mask);
4386 if (!MS.TrackOrigins)
4390 auto &
DL =
F.getDataLayout();
4391 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4392 DL.getTypeStoreSize(SrcShadow->
getType()),
4411 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4416 Value *Src =
I.getArgOperand(0);
4417 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4425 insertCheckShadowOf(Mask, &
I);
4428 Type *SrcShadowTy = getShadowTy(Src);
4429 Value *SrcShadowPtr, *SrcOriginPtr;
4430 std::tie(SrcShadowPtr, SrcOriginPtr) =
4431 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4433 SmallVector<Value *, 2> ShadowArgs;
4434 ShadowArgs.
append(1, SrcShadowPtr);
4435 ShadowArgs.
append(1, Mask);
4444 if (!MS.TrackOrigins)
4451 setOrigin(&
I, PtrSrcOrigin);
4460 assert(isFixedIntVector(Idx));
4461 auto IdxVectorSize =
4469 auto *IdxShadow = getShadow(Idx);
4474 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4479 void handleAVXVpermilvar(IntrinsicInst &
I) {
4481 Value *Shadow = getShadow(&
I, 0);
4482 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4486 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4488 {Shadow, I.getArgOperand(1)});
4491 setOriginForNaryOp(
I);
4496 void handleAVXVpermi2var(IntrinsicInst &
I) {
4501 [[maybe_unused]]
auto ArgVectorSize =
4504 ->getNumElements() == ArgVectorSize);
4506 ->getNumElements() == ArgVectorSize);
4507 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4508 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4509 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4511 Value *AShadow = getShadow(&
I, 0);
4512 Value *Idx =
I.getArgOperand(1);
4513 Value *BShadow = getShadow(&
I, 2);
4515 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4519 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4520 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4522 {AShadow, Idx, BShadow});
4524 setOriginForNaryOp(
I);
4527 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4531 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4535 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4536 return isFixedIntVectorTy(
V->getType());
4539 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4540 return isFixedFPVectorTy(
V->getType());
4562 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4567 Value *WriteThrough;
4571 WriteThrough =
I.getOperand(2);
4572 Mask =
I.getOperand(3);
4575 WriteThrough =
I.getOperand(1);
4576 Mask =
I.getOperand(2);
4581 assert(isFixedIntVector(WriteThrough));
4583 unsigned ANumElements =
4585 [[maybe_unused]]
unsigned WriteThruNumElements =
4587 assert(ANumElements == WriteThruNumElements ||
4588 ANumElements * 2 == WriteThruNumElements);
4591 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4592 assert(ANumElements == MaskNumElements ||
4593 ANumElements * 2 == MaskNumElements);
4595 assert(WriteThruNumElements == MaskNumElements);
4599 insertCheckShadowOf(Mask, &
I);
4609 Value *AShadow = getShadow(
A);
4610 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4612 if (ANumElements * 2 == MaskNumElements) {
4624 "_ms_mask_bitcast");
4634 getShadowTy(&
I),
"_ms_a_shadow");
4636 Value *WriteThroughShadow = getShadow(WriteThrough);
4638 "_ms_writethru_select");
4640 setShadow(&
I, Shadow);
4641 setOriginForNaryOp(
I);
4649 void handleBmiIntrinsic(IntrinsicInst &
I) {
4651 Type *ShadowTy = getShadowTy(&
I);
4654 Value *SMask = getShadow(&
I, 1);
4659 {getShadow(&I, 0), I.getOperand(1)});
4662 setOriginForNaryOp(
I);
4665 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4666 SmallVector<int, 8>
Mask;
4667 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4681 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4686 "pclmul 3rd operand must be a constant");
4689 getPclmulMask(Width, Imm & 0x01));
4691 getPclmulMask(Width, Imm & 0x10));
4692 ShadowAndOriginCombiner SOC(
this, IRB);
4693 SOC.Add(Shuf0, getOrigin(&
I, 0));
4694 SOC.Add(Shuf1, getOrigin(&
I, 1));
4699 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4704 Value *Second = getShadow(&
I, 1);
4706 SmallVector<int, 16>
Mask;
4707 Mask.push_back(Width);
4708 for (
unsigned i = 1; i < Width; i++)
4712 setShadow(&
I, Shadow);
4713 setOriginForNaryOp(
I);
4716 void handleVtestIntrinsic(IntrinsicInst &
I) {
4718 Value *Shadow0 = getShadow(&
I, 0);
4719 Value *Shadow1 = getShadow(&
I, 1);
4725 setShadow(&
I, Shadow);
4726 setOriginForNaryOp(
I);
4729 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4734 Value *Second = getShadow(&
I, 1);
4737 SmallVector<int, 16>
Mask;
4738 Mask.push_back(Width);
4739 for (
unsigned i = 1; i < Width; i++)
4743 setShadow(&
I, Shadow);
4744 setOriginForNaryOp(
I);
4750 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4751 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4756 ShadowAndOriginCombiner SC(
this, IRB);
4757 SC.Add(
I.getArgOperand(0));
4765 void handleAbsIntrinsic(IntrinsicInst &
I) {
4767 Value *Src =
I.getArgOperand(0);
4768 Value *IsIntMinPoison =
I.getArgOperand(1);
4770 assert(
I.getType()->isIntOrIntVectorTy());
4772 assert(Src->getType() ==
I.getType());
4778 Value *SrcShadow = getShadow(Src);
4782 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4785 Value *PoisonedShadow = getPoisonedShadow(Src);
4786 Value *PoisonedIfIntMinShadow =
4789 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4791 setShadow(&
I, Shadow);
4792 setOrigin(&
I, getOrigin(&
I, 0));
4795 void handleIsFpClass(IntrinsicInst &
I) {
4797 Value *Shadow = getShadow(&
I, 0);
4798 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4799 setOrigin(&
I, getOrigin(&
I, 0));
4802 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4804 Value *Shadow0 = getShadow(&
I, 0);
4805 Value *Shadow1 = getShadow(&
I, 1);
4808 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4814 setShadow(&
I, Shadow);
4815 setOriginForNaryOp(
I);
4821 Value *Shadow = getShadow(V);
4843 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4848 Value *WriteThrough =
I.getOperand(1);
4852 assert(isFixedIntVector(WriteThrough));
4854 unsigned ANumElements =
4856 unsigned OutputNumElements =
4858 assert(ANumElements == OutputNumElements ||
4859 ANumElements * 2 == OutputNumElements);
4862 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4863 insertCheckShadowOf(Mask, &
I);
4874 if (ANumElements != OutputNumElements) {
4876 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4883 Value *AShadow = getShadow(
A);
4887 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4897 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4898 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4900 Value *WriteThroughShadow = getShadow(WriteThrough);
4903 setShadow(&
I, Shadow);
4904 setOriginForNaryOp(
I);
4931 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
4932 unsigned WriteThruIndex,
4933 unsigned MaskIndex) {
4936 unsigned NumArgs =
I.arg_size();
4937 assert(AIndex < NumArgs);
4938 assert(WriteThruIndex < NumArgs);
4939 assert(MaskIndex < NumArgs);
4940 assert(AIndex != WriteThruIndex);
4941 assert(AIndex != MaskIndex);
4942 assert(WriteThruIndex != MaskIndex);
4944 Value *
A =
I.getOperand(AIndex);
4945 Value *WriteThru =
I.getOperand(WriteThruIndex);
4949 assert(isFixedFPVector(WriteThru));
4951 [[maybe_unused]]
unsigned ANumElements =
4953 unsigned OutputNumElements =
4955 assert(ANumElements == OutputNumElements);
4957 for (
unsigned i = 0; i < NumArgs; ++i) {
4958 if (i != AIndex && i != WriteThruIndex) {
4961 assert(
I.getOperand(i)->getType()->isIntegerTy());
4962 insertCheckShadowOf(
I.getOperand(i), &
I);
4967 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
4969 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4976 Value *AShadow = getShadow(
A);
4982 Value *WriteThruShadow = getShadow(WriteThru);
4985 setShadow(&
I, Shadow);
4987 setOriginForNaryOp(
I);
4997 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5003 Value *WriteThrough =
I.getOperand(2);
5010 insertCheckShadowOf(Mask, &
I);
5014 unsigned NumElements =
5016 assert(NumElements == 8);
5017 assert(
A->getType() ==
B->getType());
5019 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5022 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5023 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5025 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5027 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5034 Value *AShadow = getShadow(
A);
5035 Value *DstLowerShadow =
5036 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5038 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5041 setShadow(&
I, DstShadow);
5042 setOriginForNaryOp(
I);
5072 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5083 ->getScalarSizeInBits() == 8);
5085 assert(
A->getType() ==
X->getType());
5087 assert(
B->getType()->isIntegerTy());
5088 assert(
B->getType()->getScalarSizeInBits() == 8);
5090 assert(
I.getType() ==
A->getType());
5092 Value *AShadow = getShadow(
A);
5093 Value *XShadow = getShadow(
X);
5094 Value *BZeroShadow = getCleanShadow(
B);
5097 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5099 {X, AShadow, BZeroShadow});
5101 {XShadow, A, BZeroShadow});
5104 Value *BShadow = getShadow(
B);
5105 Value *BBroadcastShadow = getCleanShadow(AShadow);
5110 for (
unsigned i = 0; i < NumElements; i++)
5114 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5115 setOriginForNaryOp(
I);
5129 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5130 unsigned int numArgs =
I.arg_size();
5133 assert(
I.getType()->isStructTy());
5143 assert(4 <= numArgs && numArgs <= 6);
5157 for (
unsigned int i = 0; i < numArgs - 2; i++)
5158 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5161 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5165 insertCheckShadowOf(LaneNumber, &
I);
5168 Value *Src =
I.getArgOperand(numArgs - 1);
5169 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5171 Type *SrcShadowTy = getShadowTy(Src);
5172 auto [SrcShadowPtr, SrcOriginPtr] =
5173 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5183 if (!MS.TrackOrigins)
5187 setOrigin(&
I, PtrSrcOrigin);
5204 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5208 int numArgOperands =
I.arg_size();
5211 assert(numArgOperands >= 1);
5212 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5214 int skipTrailingOperands = 1;
5217 insertCheckShadowOf(Addr, &
I);
5221 skipTrailingOperands++;
5222 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5224 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5227 SmallVector<Value *, 8> ShadowArgs;
5229 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5231 Value *Shadow = getShadow(&
I, i);
5232 ShadowArgs.
append(1, Shadow);
5249 (numArgOperands - skipTrailingOperands));
5250 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5254 I.getArgOperand(numArgOperands - skipTrailingOperands));
5256 Value *OutputShadowPtr, *OutputOriginPtr;
5258 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5259 Addr, IRB, OutputShadowTy,
Align(1),
true);
5260 ShadowArgs.
append(1, OutputShadowPtr);
5266 if (MS.TrackOrigins) {
5274 OriginCombiner
OC(
this, IRB);
5275 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5276 OC.Add(
I.getArgOperand(i));
5278 const DataLayout &
DL =
F.getDataLayout();
5279 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5306 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5308 unsigned int trailingVerbatimArgs) {
5311 assert(trailingVerbatimArgs <
I.arg_size());
5313 SmallVector<Value *, 8> ShadowArgs;
5315 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5316 Value *Shadow = getShadow(&
I, i);
5324 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5326 Value *Arg =
I.getArgOperand(i);
5332 Value *CombinedShadow = CI;
5335 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5338 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5339 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5344 setOriginForNaryOp(
I);
5350 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5356 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5357 switch (
I.getIntrinsicID()) {
5358 case Intrinsic::uadd_with_overflow:
5359 case Intrinsic::sadd_with_overflow:
5360 case Intrinsic::usub_with_overflow:
5361 case Intrinsic::ssub_with_overflow:
5362 case Intrinsic::umul_with_overflow:
5363 case Intrinsic::smul_with_overflow:
5364 handleArithmeticWithOverflow(
I);
5366 case Intrinsic::abs:
5367 handleAbsIntrinsic(
I);
5369 case Intrinsic::bitreverse:
5370 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5373 case Intrinsic::is_fpclass:
5376 case Intrinsic::lifetime_start:
5377 handleLifetimeStart(
I);
5379 case Intrinsic::launder_invariant_group:
5380 case Intrinsic::strip_invariant_group:
5381 handleInvariantGroup(
I);
5383 case Intrinsic::bswap:
5386 case Intrinsic::ctlz:
5387 case Intrinsic::cttz:
5388 handleCountLeadingTrailingZeros(
I);
5390 case Intrinsic::masked_compressstore:
5391 handleMaskedCompressStore(
I);
5393 case Intrinsic::masked_expandload:
5394 handleMaskedExpandLoad(
I);
5396 case Intrinsic::masked_gather:
5397 handleMaskedGather(
I);
5399 case Intrinsic::masked_scatter:
5400 handleMaskedScatter(
I);
5402 case Intrinsic::masked_store:
5403 handleMaskedStore(
I);
5405 case Intrinsic::masked_load:
5406 handleMaskedLoad(
I);
5408 case Intrinsic::vector_reduce_and:
5409 handleVectorReduceAndIntrinsic(
I);
5411 case Intrinsic::vector_reduce_or:
5412 handleVectorReduceOrIntrinsic(
I);
5415 case Intrinsic::vector_reduce_add:
5416 case Intrinsic::vector_reduce_xor:
5417 case Intrinsic::vector_reduce_mul:
5420 case Intrinsic::vector_reduce_smax:
5421 case Intrinsic::vector_reduce_smin:
5422 case Intrinsic::vector_reduce_umax:
5423 case Intrinsic::vector_reduce_umin:
5426 case Intrinsic::vector_reduce_fmax:
5427 case Intrinsic::vector_reduce_fmin:
5428 handleVectorReduceIntrinsic(
I,
false);
5431 case Intrinsic::vector_reduce_fadd:
5432 case Intrinsic::vector_reduce_fmul:
5433 handleVectorReduceWithStarterIntrinsic(
I);
5436 case Intrinsic::scmp:
5437 case Intrinsic::ucmp: {
5442 case Intrinsic::fshl:
5443 case Intrinsic::fshr:
5444 handleFunnelShift(
I);
5447 case Intrinsic::is_constant:
5449 setShadow(&
I, getCleanShadow(&
I));
5450 setOrigin(&
I, getCleanOrigin());
5460 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5461 switch (
I.getIntrinsicID()) {
5462 case Intrinsic::x86_sse_stmxcsr:
5465 case Intrinsic::x86_sse_ldmxcsr:
5472 case Intrinsic::x86_avx512_vcvtsd2usi64:
5473 case Intrinsic::x86_avx512_vcvtsd2usi32:
5474 case Intrinsic::x86_avx512_vcvtss2usi64:
5475 case Intrinsic::x86_avx512_vcvtss2usi32:
5476 case Intrinsic::x86_avx512_cvttss2usi64:
5477 case Intrinsic::x86_avx512_cvttss2usi:
5478 case Intrinsic::x86_avx512_cvttsd2usi64:
5479 case Intrinsic::x86_avx512_cvttsd2usi:
5480 case Intrinsic::x86_avx512_cvtusi2ss:
5481 case Intrinsic::x86_avx512_cvtusi642sd:
5482 case Intrinsic::x86_avx512_cvtusi642ss:
5483 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5485 case Intrinsic::x86_sse2_cvtsd2si64:
5486 case Intrinsic::x86_sse2_cvtsd2si:
5487 case Intrinsic::x86_sse2_cvtsd2ss:
5488 case Intrinsic::x86_sse2_cvttsd2si64:
5489 case Intrinsic::x86_sse2_cvttsd2si:
5490 case Intrinsic::x86_sse_cvtss2si64:
5491 case Intrinsic::x86_sse_cvtss2si:
5492 case Intrinsic::x86_sse_cvttss2si64:
5493 case Intrinsic::x86_sse_cvttss2si:
5494 handleSSEVectorConvertIntrinsic(
I, 1);
5496 case Intrinsic::x86_sse_cvtps2pi:
5497 case Intrinsic::x86_sse_cvttps2pi:
5498 handleSSEVectorConvertIntrinsic(
I, 2);
5506 case Intrinsic::x86_vcvtps2ph_128:
5507 case Intrinsic::x86_vcvtps2ph_256: {
5508 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5517 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5518 handleAVX512VectorConvertFPToInt(
I,
false);
5523 case Intrinsic::x86_sse2_cvtpd2ps:
5524 case Intrinsic::x86_sse2_cvtps2dq:
5525 case Intrinsic::x86_sse2_cvtpd2dq:
5526 case Intrinsic::x86_sse2_cvttps2dq:
5527 case Intrinsic::x86_sse2_cvttpd2dq:
5528 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5529 case Intrinsic::x86_avx_cvt_ps2dq_256:
5530 case Intrinsic::x86_avx_cvt_pd2dq_256:
5531 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5532 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5533 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5544 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5545 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5546 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5547 handleAVX512VectorConvertFPToInt(
I,
true);
5551 case Intrinsic::x86_avx512_psll_w_512:
5552 case Intrinsic::x86_avx512_psll_d_512:
5553 case Intrinsic::x86_avx512_psll_q_512:
5554 case Intrinsic::x86_avx512_pslli_w_512:
5555 case Intrinsic::x86_avx512_pslli_d_512:
5556 case Intrinsic::x86_avx512_pslli_q_512:
5557 case Intrinsic::x86_avx512_psrl_w_512:
5558 case Intrinsic::x86_avx512_psrl_d_512:
5559 case Intrinsic::x86_avx512_psrl_q_512:
5560 case Intrinsic::x86_avx512_psra_w_512:
5561 case Intrinsic::x86_avx512_psra_d_512:
5562 case Intrinsic::x86_avx512_psra_q_512:
5563 case Intrinsic::x86_avx512_psrli_w_512:
5564 case Intrinsic::x86_avx512_psrli_d_512:
5565 case Intrinsic::x86_avx512_psrli_q_512:
5566 case Intrinsic::x86_avx512_psrai_w_512:
5567 case Intrinsic::x86_avx512_psrai_d_512:
5568 case Intrinsic::x86_avx512_psrai_q_512:
5569 case Intrinsic::x86_avx512_psra_q_256:
5570 case Intrinsic::x86_avx512_psra_q_128:
5571 case Intrinsic::x86_avx512_psrai_q_256:
5572 case Intrinsic::x86_avx512_psrai_q_128:
5573 case Intrinsic::x86_avx2_psll_w:
5574 case Intrinsic::x86_avx2_psll_d:
5575 case Intrinsic::x86_avx2_psll_q:
5576 case Intrinsic::x86_avx2_pslli_w:
5577 case Intrinsic::x86_avx2_pslli_d:
5578 case Intrinsic::x86_avx2_pslli_q:
5579 case Intrinsic::x86_avx2_psrl_w:
5580 case Intrinsic::x86_avx2_psrl_d:
5581 case Intrinsic::x86_avx2_psrl_q:
5582 case Intrinsic::x86_avx2_psra_w:
5583 case Intrinsic::x86_avx2_psra_d:
5584 case Intrinsic::x86_avx2_psrli_w:
5585 case Intrinsic::x86_avx2_psrli_d:
5586 case Intrinsic::x86_avx2_psrli_q:
5587 case Intrinsic::x86_avx2_psrai_w:
5588 case Intrinsic::x86_avx2_psrai_d:
5589 case Intrinsic::x86_sse2_psll_w:
5590 case Intrinsic::x86_sse2_psll_d:
5591 case Intrinsic::x86_sse2_psll_q:
5592 case Intrinsic::x86_sse2_pslli_w:
5593 case Intrinsic::x86_sse2_pslli_d:
5594 case Intrinsic::x86_sse2_pslli_q:
5595 case Intrinsic::x86_sse2_psrl_w:
5596 case Intrinsic::x86_sse2_psrl_d:
5597 case Intrinsic::x86_sse2_psrl_q:
5598 case Intrinsic::x86_sse2_psra_w:
5599 case Intrinsic::x86_sse2_psra_d:
5600 case Intrinsic::x86_sse2_psrli_w:
5601 case Intrinsic::x86_sse2_psrli_d:
5602 case Intrinsic::x86_sse2_psrli_q:
5603 case Intrinsic::x86_sse2_psrai_w:
5604 case Intrinsic::x86_sse2_psrai_d:
5605 case Intrinsic::x86_mmx_psll_w:
5606 case Intrinsic::x86_mmx_psll_d:
5607 case Intrinsic::x86_mmx_psll_q:
5608 case Intrinsic::x86_mmx_pslli_w:
5609 case Intrinsic::x86_mmx_pslli_d:
5610 case Intrinsic::x86_mmx_pslli_q:
5611 case Intrinsic::x86_mmx_psrl_w:
5612 case Intrinsic::x86_mmx_psrl_d:
5613 case Intrinsic::x86_mmx_psrl_q:
5614 case Intrinsic::x86_mmx_psra_w:
5615 case Intrinsic::x86_mmx_psra_d:
5616 case Intrinsic::x86_mmx_psrli_w:
5617 case Intrinsic::x86_mmx_psrli_d:
5618 case Intrinsic::x86_mmx_psrli_q:
5619 case Intrinsic::x86_mmx_psrai_w:
5620 case Intrinsic::x86_mmx_psrai_d:
5621 handleVectorShiftIntrinsic(
I,
false);
5623 case Intrinsic::x86_avx2_psllv_d:
5624 case Intrinsic::x86_avx2_psllv_d_256:
5625 case Intrinsic::x86_avx512_psllv_d_512:
5626 case Intrinsic::x86_avx2_psllv_q:
5627 case Intrinsic::x86_avx2_psllv_q_256:
5628 case Intrinsic::x86_avx512_psllv_q_512:
5629 case Intrinsic::x86_avx2_psrlv_d:
5630 case Intrinsic::x86_avx2_psrlv_d_256:
5631 case Intrinsic::x86_avx512_psrlv_d_512:
5632 case Intrinsic::x86_avx2_psrlv_q:
5633 case Intrinsic::x86_avx2_psrlv_q_256:
5634 case Intrinsic::x86_avx512_psrlv_q_512:
5635 case Intrinsic::x86_avx2_psrav_d:
5636 case Intrinsic::x86_avx2_psrav_d_256:
5637 case Intrinsic::x86_avx512_psrav_d_512:
5638 case Intrinsic::x86_avx512_psrav_q_128:
5639 case Intrinsic::x86_avx512_psrav_q_256:
5640 case Intrinsic::x86_avx512_psrav_q_512:
5641 handleVectorShiftIntrinsic(
I,
true);
5645 case Intrinsic::x86_sse2_packsswb_128:
5646 case Intrinsic::x86_sse2_packssdw_128:
5647 case Intrinsic::x86_sse2_packuswb_128:
5648 case Intrinsic::x86_sse41_packusdw:
5649 case Intrinsic::x86_avx2_packsswb:
5650 case Intrinsic::x86_avx2_packssdw:
5651 case Intrinsic::x86_avx2_packuswb:
5652 case Intrinsic::x86_avx2_packusdw:
5658 case Intrinsic::x86_avx512_packsswb_512:
5659 case Intrinsic::x86_avx512_packssdw_512:
5660 case Intrinsic::x86_avx512_packuswb_512:
5661 case Intrinsic::x86_avx512_packusdw_512:
5662 handleVectorPackIntrinsic(
I);
5665 case Intrinsic::x86_sse41_pblendvb:
5666 case Intrinsic::x86_sse41_blendvpd:
5667 case Intrinsic::x86_sse41_blendvps:
5668 case Intrinsic::x86_avx_blendv_pd_256:
5669 case Intrinsic::x86_avx_blendv_ps_256:
5670 case Intrinsic::x86_avx2_pblendvb:
5671 handleBlendvIntrinsic(
I);
5674 case Intrinsic::x86_avx_dp_ps_256:
5675 case Intrinsic::x86_sse41_dppd:
5676 case Intrinsic::x86_sse41_dpps:
5677 handleDppIntrinsic(
I);
5680 case Intrinsic::x86_mmx_packsswb:
5681 case Intrinsic::x86_mmx_packuswb:
5682 handleVectorPackIntrinsic(
I, 16);
5685 case Intrinsic::x86_mmx_packssdw:
5686 handleVectorPackIntrinsic(
I, 32);
5689 case Intrinsic::x86_mmx_psad_bw:
5690 handleVectorSadIntrinsic(
I,
true);
5692 case Intrinsic::x86_sse2_psad_bw:
5693 case Intrinsic::x86_avx2_psad_bw:
5694 handleVectorSadIntrinsic(
I);
5720 case Intrinsic::x86_sse2_pmadd_wd:
5721 case Intrinsic::x86_avx2_pmadd_wd:
5722 case Intrinsic::x86_avx512_pmaddw_d_512:
5723 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5724 case Intrinsic::x86_avx2_pmadd_ub_sw:
5725 case Intrinsic::x86_avx512_pmaddubs_w_512:
5726 handleVectorPmaddIntrinsic(
I, 2);
5730 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5731 handleVectorPmaddIntrinsic(
I, 2, 8);
5735 case Intrinsic::x86_mmx_pmadd_wd:
5736 handleVectorPmaddIntrinsic(
I, 2, 16);
5798 case Intrinsic::x86_avx512_vpdpbusd_128:
5799 case Intrinsic::x86_avx512_vpdpbusd_256:
5800 case Intrinsic::x86_avx512_vpdpbusd_512:
5801 case Intrinsic::x86_avx512_vpdpbusds_128:
5802 case Intrinsic::x86_avx512_vpdpbusds_256:
5803 case Intrinsic::x86_avx512_vpdpbusds_512:
5804 case Intrinsic::x86_avx2_vpdpbssd_128:
5805 case Intrinsic::x86_avx2_vpdpbssd_256:
5806 case Intrinsic::x86_avx10_vpdpbssd_512:
5807 case Intrinsic::x86_avx2_vpdpbssds_128:
5808 case Intrinsic::x86_avx2_vpdpbssds_256:
5809 case Intrinsic::x86_avx10_vpdpbssds_512:
5810 case Intrinsic::x86_avx2_vpdpbsud_128:
5811 case Intrinsic::x86_avx2_vpdpbsud_256:
5812 case Intrinsic::x86_avx10_vpdpbsud_512:
5813 case Intrinsic::x86_avx2_vpdpbsuds_128:
5814 case Intrinsic::x86_avx2_vpdpbsuds_256:
5815 case Intrinsic::x86_avx10_vpdpbsuds_512:
5816 case Intrinsic::x86_avx2_vpdpbuud_128:
5817 case Intrinsic::x86_avx2_vpdpbuud_256:
5818 case Intrinsic::x86_avx10_vpdpbuud_512:
5819 case Intrinsic::x86_avx2_vpdpbuuds_128:
5820 case Intrinsic::x86_avx2_vpdpbuuds_256:
5821 case Intrinsic::x86_avx10_vpdpbuuds_512:
5822 handleVectorPmaddIntrinsic(
I, 4, 8);
5869 case Intrinsic::x86_avx512_vpdpwssd_128:
5870 case Intrinsic::x86_avx512_vpdpwssd_256:
5871 case Intrinsic::x86_avx512_vpdpwssd_512:
5872 case Intrinsic::x86_avx512_vpdpwssds_128:
5873 case Intrinsic::x86_avx512_vpdpwssds_256:
5874 case Intrinsic::x86_avx512_vpdpwssds_512:
5875 handleVectorPmaddIntrinsic(
I, 2, 16);
5888 case Intrinsic::x86_sse_cmp_ss:
5889 case Intrinsic::x86_sse2_cmp_sd:
5890 case Intrinsic::x86_sse_comieq_ss:
5891 case Intrinsic::x86_sse_comilt_ss:
5892 case Intrinsic::x86_sse_comile_ss:
5893 case Intrinsic::x86_sse_comigt_ss:
5894 case Intrinsic::x86_sse_comige_ss:
5895 case Intrinsic::x86_sse_comineq_ss:
5896 case Intrinsic::x86_sse_ucomieq_ss:
5897 case Intrinsic::x86_sse_ucomilt_ss:
5898 case Intrinsic::x86_sse_ucomile_ss:
5899 case Intrinsic::x86_sse_ucomigt_ss:
5900 case Intrinsic::x86_sse_ucomige_ss:
5901 case Intrinsic::x86_sse_ucomineq_ss:
5902 case Intrinsic::x86_sse2_comieq_sd:
5903 case Intrinsic::x86_sse2_comilt_sd:
5904 case Intrinsic::x86_sse2_comile_sd:
5905 case Intrinsic::x86_sse2_comigt_sd:
5906 case Intrinsic::x86_sse2_comige_sd:
5907 case Intrinsic::x86_sse2_comineq_sd:
5908 case Intrinsic::x86_sse2_ucomieq_sd:
5909 case Intrinsic::x86_sse2_ucomilt_sd:
5910 case Intrinsic::x86_sse2_ucomile_sd:
5911 case Intrinsic::x86_sse2_ucomigt_sd:
5912 case Intrinsic::x86_sse2_ucomige_sd:
5913 case Intrinsic::x86_sse2_ucomineq_sd:
5914 handleVectorCompareScalarIntrinsic(
I);
5917 case Intrinsic::x86_avx_cmp_pd_256:
5918 case Intrinsic::x86_avx_cmp_ps_256:
5919 case Intrinsic::x86_sse2_cmp_pd:
5920 case Intrinsic::x86_sse_cmp_ps:
5921 handleVectorComparePackedIntrinsic(
I);
5924 case Intrinsic::x86_bmi_bextr_32:
5925 case Intrinsic::x86_bmi_bextr_64:
5926 case Intrinsic::x86_bmi_bzhi_32:
5927 case Intrinsic::x86_bmi_bzhi_64:
5928 case Intrinsic::x86_bmi_pdep_32:
5929 case Intrinsic::x86_bmi_pdep_64:
5930 case Intrinsic::x86_bmi_pext_32:
5931 case Intrinsic::x86_bmi_pext_64:
5932 handleBmiIntrinsic(
I);
5935 case Intrinsic::x86_pclmulqdq:
5936 case Intrinsic::x86_pclmulqdq_256:
5937 case Intrinsic::x86_pclmulqdq_512:
5938 handlePclmulIntrinsic(
I);
5941 case Intrinsic::x86_avx_round_pd_256:
5942 case Intrinsic::x86_avx_round_ps_256:
5943 case Intrinsic::x86_sse41_round_pd:
5944 case Intrinsic::x86_sse41_round_ps:
5945 handleRoundPdPsIntrinsic(
I);
5948 case Intrinsic::x86_sse41_round_sd:
5949 case Intrinsic::x86_sse41_round_ss:
5950 handleUnarySdSsIntrinsic(
I);
5953 case Intrinsic::x86_sse2_max_sd:
5954 case Intrinsic::x86_sse_max_ss:
5955 case Intrinsic::x86_sse2_min_sd:
5956 case Intrinsic::x86_sse_min_ss:
5957 handleBinarySdSsIntrinsic(
I);
5960 case Intrinsic::x86_avx_vtestc_pd:
5961 case Intrinsic::x86_avx_vtestc_pd_256:
5962 case Intrinsic::x86_avx_vtestc_ps:
5963 case Intrinsic::x86_avx_vtestc_ps_256:
5964 case Intrinsic::x86_avx_vtestnzc_pd:
5965 case Intrinsic::x86_avx_vtestnzc_pd_256:
5966 case Intrinsic::x86_avx_vtestnzc_ps:
5967 case Intrinsic::x86_avx_vtestnzc_ps_256:
5968 case Intrinsic::x86_avx_vtestz_pd:
5969 case Intrinsic::x86_avx_vtestz_pd_256:
5970 case Intrinsic::x86_avx_vtestz_ps:
5971 case Intrinsic::x86_avx_vtestz_ps_256:
5972 case Intrinsic::x86_avx_ptestc_256:
5973 case Intrinsic::x86_avx_ptestnzc_256:
5974 case Intrinsic::x86_avx_ptestz_256:
5975 case Intrinsic::x86_sse41_ptestc:
5976 case Intrinsic::x86_sse41_ptestnzc:
5977 case Intrinsic::x86_sse41_ptestz:
5978 handleVtestIntrinsic(
I);
5982 case Intrinsic::x86_ssse3_phadd_w:
5983 case Intrinsic::x86_ssse3_phadd_w_128:
5984 case Intrinsic::x86_avx2_phadd_w:
5985 case Intrinsic::x86_ssse3_phsub_w:
5986 case Intrinsic::x86_ssse3_phsub_w_128:
5987 case Intrinsic::x86_avx2_phsub_w: {
5988 handlePairwiseShadowOrIntrinsic(
I, 16);
5993 case Intrinsic::x86_ssse3_phadd_d:
5994 case Intrinsic::x86_ssse3_phadd_d_128:
5995 case Intrinsic::x86_avx2_phadd_d:
5996 case Intrinsic::x86_ssse3_phsub_d:
5997 case Intrinsic::x86_ssse3_phsub_d_128:
5998 case Intrinsic::x86_avx2_phsub_d: {
5999 handlePairwiseShadowOrIntrinsic(
I, 32);
6004 case Intrinsic::x86_ssse3_phadd_sw:
6005 case Intrinsic::x86_ssse3_phadd_sw_128:
6006 case Intrinsic::x86_avx2_phadd_sw:
6007 case Intrinsic::x86_ssse3_phsub_sw:
6008 case Intrinsic::x86_ssse3_phsub_sw_128:
6009 case Intrinsic::x86_avx2_phsub_sw: {
6010 handlePairwiseShadowOrIntrinsic(
I, 16);
6015 case Intrinsic::x86_sse3_hadd_ps:
6016 case Intrinsic::x86_sse3_hadd_pd:
6017 case Intrinsic::x86_avx_hadd_pd_256:
6018 case Intrinsic::x86_avx_hadd_ps_256:
6019 case Intrinsic::x86_sse3_hsub_ps:
6020 case Intrinsic::x86_sse3_hsub_pd:
6021 case Intrinsic::x86_avx_hsub_pd_256:
6022 case Intrinsic::x86_avx_hsub_ps_256: {
6023 handlePairwiseShadowOrIntrinsic(
I);
6027 case Intrinsic::x86_avx_maskstore_ps:
6028 case Intrinsic::x86_avx_maskstore_pd:
6029 case Intrinsic::x86_avx_maskstore_ps_256:
6030 case Intrinsic::x86_avx_maskstore_pd_256:
6031 case Intrinsic::x86_avx2_maskstore_d:
6032 case Intrinsic::x86_avx2_maskstore_q:
6033 case Intrinsic::x86_avx2_maskstore_d_256:
6034 case Intrinsic::x86_avx2_maskstore_q_256: {
6035 handleAVXMaskedStore(
I);
6039 case Intrinsic::x86_avx_maskload_ps:
6040 case Intrinsic::x86_avx_maskload_pd:
6041 case Intrinsic::x86_avx_maskload_ps_256:
6042 case Intrinsic::x86_avx_maskload_pd_256:
6043 case Intrinsic::x86_avx2_maskload_d:
6044 case Intrinsic::x86_avx2_maskload_q:
6045 case Intrinsic::x86_avx2_maskload_d_256:
6046 case Intrinsic::x86_avx2_maskload_q_256: {
6047 handleAVXMaskedLoad(
I);
6052 case Intrinsic::x86_avx512fp16_add_ph_512:
6053 case Intrinsic::x86_avx512fp16_sub_ph_512:
6054 case Intrinsic::x86_avx512fp16_mul_ph_512:
6055 case Intrinsic::x86_avx512fp16_div_ph_512:
6056 case Intrinsic::x86_avx512fp16_max_ph_512:
6057 case Intrinsic::x86_avx512fp16_min_ph_512:
6058 case Intrinsic::x86_avx512_min_ps_512:
6059 case Intrinsic::x86_avx512_min_pd_512:
6060 case Intrinsic::x86_avx512_max_ps_512:
6061 case Intrinsic::x86_avx512_max_pd_512: {
6066 [[maybe_unused]]
bool Success =
6067 maybeHandleSimpleNomemIntrinsic(
I, 1);
6072 case Intrinsic::x86_avx_vpermilvar_pd:
6073 case Intrinsic::x86_avx_vpermilvar_pd_256:
6074 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6075 case Intrinsic::x86_avx_vpermilvar_ps:
6076 case Intrinsic::x86_avx_vpermilvar_ps_256:
6077 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6078 handleAVXVpermilvar(
I);
6082 case Intrinsic::x86_avx512_vpermi2var_d_128:
6083 case Intrinsic::x86_avx512_vpermi2var_d_256:
6084 case Intrinsic::x86_avx512_vpermi2var_d_512:
6085 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6086 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6087 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6088 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6089 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6090 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6091 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6092 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6093 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6094 case Intrinsic::x86_avx512_vpermi2var_q_128:
6095 case Intrinsic::x86_avx512_vpermi2var_q_256:
6096 case Intrinsic::x86_avx512_vpermi2var_q_512:
6097 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6098 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6099 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6100 handleAVXVpermi2var(
I);
6114 case Intrinsic::x86_avx2_pshuf_b:
6115 case Intrinsic::x86_sse_pshuf_w:
6116 case Intrinsic::x86_ssse3_pshuf_b_128:
6117 case Intrinsic::x86_ssse3_pshuf_b:
6118 case Intrinsic::x86_avx512_pshuf_b_512:
6119 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6125 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6126 case Intrinsic::x86_avx512_mask_pmov_db_512:
6127 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6128 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6131 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6139 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6140 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6141 handleIntrinsicByApplyingToShadow(
I,
6142 Intrinsic::x86_avx512_mask_pmov_dw_512,
6147 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6148 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6149 handleIntrinsicByApplyingToShadow(
I,
6150 Intrinsic::x86_avx512_mask_pmov_db_512,
6155 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6156 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6157 handleIntrinsicByApplyingToShadow(
I,
6158 Intrinsic::x86_avx512_mask_pmov_qb_512,
6163 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6164 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6165 handleIntrinsicByApplyingToShadow(
I,
6166 Intrinsic::x86_avx512_mask_pmov_qw_512,
6171 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6172 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6173 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6174 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6178 handleAVX512VectorDownConvert(
I);
6218 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6219 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6220 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6221 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6222 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6223 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6224 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6225 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6226 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6227 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6228 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6229 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6230 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6270 case Intrinsic::x86_avx512_rcp14_ps_512:
6271 case Intrinsic::x86_avx512_rcp14_ps_256:
6272 case Intrinsic::x86_avx512_rcp14_ps_128:
6273 case Intrinsic::x86_avx512_rcp14_pd_512:
6274 case Intrinsic::x86_avx512_rcp14_pd_256:
6275 case Intrinsic::x86_avx512_rcp14_pd_128:
6276 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6277 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6278 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6279 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6280 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6281 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6282 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6326 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6327 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6328 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6329 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6330 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6331 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6332 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6333 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6334 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6335 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6336 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6337 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6338 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6343 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6344 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6345 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6346 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6347 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6348 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6349 visitGenericScalarHalfwordInst(
I);
6354 case Intrinsic::x86_vgf2p8affineqb_128:
6355 case Intrinsic::x86_vgf2p8affineqb_256:
6356 case Intrinsic::x86_vgf2p8affineqb_512:
6357 handleAVXGF2P8Affine(
I);
6367 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6368 switch (
I.getIntrinsicID()) {
6369 case Intrinsic::aarch64_neon_rshrn:
6370 case Intrinsic::aarch64_neon_sqrshl:
6371 case Intrinsic::aarch64_neon_sqrshrn:
6372 case Intrinsic::aarch64_neon_sqrshrun:
6373 case Intrinsic::aarch64_neon_sqshl:
6374 case Intrinsic::aarch64_neon_sqshlu:
6375 case Intrinsic::aarch64_neon_sqshrn:
6376 case Intrinsic::aarch64_neon_sqshrun:
6377 case Intrinsic::aarch64_neon_srshl:
6378 case Intrinsic::aarch64_neon_sshl:
6379 case Intrinsic::aarch64_neon_uqrshl:
6380 case Intrinsic::aarch64_neon_uqrshrn:
6381 case Intrinsic::aarch64_neon_uqshl:
6382 case Intrinsic::aarch64_neon_uqshrn:
6383 case Intrinsic::aarch64_neon_urshl:
6384 case Intrinsic::aarch64_neon_ushl:
6386 handleVectorShiftIntrinsic(
I,
false);
6391 case Intrinsic::aarch64_neon_fmaxp:
6392 case Intrinsic::aarch64_neon_fminp:
6394 case Intrinsic::aarch64_neon_fmaxnmp:
6395 case Intrinsic::aarch64_neon_fminnmp:
6397 case Intrinsic::aarch64_neon_smaxp:
6398 case Intrinsic::aarch64_neon_sminp:
6399 case Intrinsic::aarch64_neon_umaxp:
6400 case Intrinsic::aarch64_neon_uminp:
6402 case Intrinsic::aarch64_neon_addp:
6404 case Intrinsic::aarch64_neon_faddp:
6406 case Intrinsic::aarch64_neon_saddlp:
6407 case Intrinsic::aarch64_neon_uaddlp: {
6408 handlePairwiseShadowOrIntrinsic(
I);
6413 case Intrinsic::aarch64_neon_fcvtas:
6414 case Intrinsic::aarch64_neon_fcvtau:
6416 case Intrinsic::aarch64_neon_fcvtms:
6417 case Intrinsic::aarch64_neon_fcvtmu:
6419 case Intrinsic::aarch64_neon_fcvtns:
6420 case Intrinsic::aarch64_neon_fcvtnu:
6422 case Intrinsic::aarch64_neon_fcvtps:
6423 case Intrinsic::aarch64_neon_fcvtpu:
6425 case Intrinsic::aarch64_neon_fcvtzs:
6426 case Intrinsic::aarch64_neon_fcvtzu:
6428 case Intrinsic::aarch64_neon_fcvtxn: {
6429 handleNEONVectorConvertIntrinsic(
I);
6434 case Intrinsic::aarch64_neon_faddv:
6435 case Intrinsic::aarch64_neon_saddv:
6436 case Intrinsic::aarch64_neon_uaddv:
6439 case Intrinsic::aarch64_neon_smaxv:
6440 case Intrinsic::aarch64_neon_sminv:
6441 case Intrinsic::aarch64_neon_umaxv:
6442 case Intrinsic::aarch64_neon_uminv:
6446 case Intrinsic::aarch64_neon_fmaxv:
6447 case Intrinsic::aarch64_neon_fminv:
6448 case Intrinsic::aarch64_neon_fmaxnmv:
6449 case Intrinsic::aarch64_neon_fminnmv:
6451 case Intrinsic::aarch64_neon_saddlv:
6452 case Intrinsic::aarch64_neon_uaddlv:
6453 handleVectorReduceIntrinsic(
I,
true);
6456 case Intrinsic::aarch64_neon_ld1x2:
6457 case Intrinsic::aarch64_neon_ld1x3:
6458 case Intrinsic::aarch64_neon_ld1x4:
6459 case Intrinsic::aarch64_neon_ld2:
6460 case Intrinsic::aarch64_neon_ld3:
6461 case Intrinsic::aarch64_neon_ld4:
6462 case Intrinsic::aarch64_neon_ld2r:
6463 case Intrinsic::aarch64_neon_ld3r:
6464 case Intrinsic::aarch64_neon_ld4r: {
6465 handleNEONVectorLoad(
I,
false);
6469 case Intrinsic::aarch64_neon_ld2lane:
6470 case Intrinsic::aarch64_neon_ld3lane:
6471 case Intrinsic::aarch64_neon_ld4lane: {
6472 handleNEONVectorLoad(
I,
true);
6477 case Intrinsic::aarch64_neon_sqxtn:
6478 case Intrinsic::aarch64_neon_sqxtun:
6479 case Intrinsic::aarch64_neon_uqxtn:
6486 case Intrinsic::aarch64_neon_st1x2:
6487 case Intrinsic::aarch64_neon_st1x3:
6488 case Intrinsic::aarch64_neon_st1x4:
6489 case Intrinsic::aarch64_neon_st2:
6490 case Intrinsic::aarch64_neon_st3:
6491 case Intrinsic::aarch64_neon_st4: {
6492 handleNEONVectorStoreIntrinsic(
I,
false);
6496 case Intrinsic::aarch64_neon_st2lane:
6497 case Intrinsic::aarch64_neon_st3lane:
6498 case Intrinsic::aarch64_neon_st4lane: {
6499 handleNEONVectorStoreIntrinsic(
I,
true);
6512 case Intrinsic::aarch64_neon_tbl1:
6513 case Intrinsic::aarch64_neon_tbl2:
6514 case Intrinsic::aarch64_neon_tbl3:
6515 case Intrinsic::aarch64_neon_tbl4:
6516 case Intrinsic::aarch64_neon_tbx1:
6517 case Intrinsic::aarch64_neon_tbx2:
6518 case Intrinsic::aarch64_neon_tbx3:
6519 case Intrinsic::aarch64_neon_tbx4: {
6521 handleIntrinsicByApplyingToShadow(
6522 I,
I.getIntrinsicID(),
6527 case Intrinsic::aarch64_neon_fmulx:
6528 case Intrinsic::aarch64_neon_pmul:
6529 case Intrinsic::aarch64_neon_pmull:
6530 case Intrinsic::aarch64_neon_smull:
6531 case Intrinsic::aarch64_neon_pmull64:
6532 case Intrinsic::aarch64_neon_umull: {
6533 handleNEONVectorMultiplyIntrinsic(
I);
6544 void visitIntrinsicInst(IntrinsicInst &
I) {
6545 if (maybeHandleCrossPlatformIntrinsic(
I))
6548 if (maybeHandleX86SIMDIntrinsic(
I))
6551 if (maybeHandleArmSIMDIntrinsic(
I))
6554 if (maybeHandleUnknownIntrinsic(
I))
6557 visitInstruction(
I);
6560 void visitLibAtomicLoad(CallBase &CB) {
6571 Value *NewOrdering =
6575 NextNodeIRBuilder NextIRB(&CB);
6576 Value *SrcShadowPtr, *SrcOriginPtr;
6577 std::tie(SrcShadowPtr, SrcOriginPtr) =
6578 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6580 Value *DstShadowPtr =
6581 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6585 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6586 if (MS.TrackOrigins) {
6587 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6589 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6590 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6594 void visitLibAtomicStore(CallBase &CB) {
6601 Value *NewOrdering =
6605 Value *DstShadowPtr =
6615 void visitCallBase(CallBase &CB) {
6623 visitAsmInstruction(CB);
6625 visitInstruction(CB);
6634 case LibFunc_atomic_load:
6636 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6640 visitLibAtomicLoad(CB);
6642 case LibFunc_atomic_store:
6643 visitLibAtomicStore(CB);
6659 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6663 Func->removeFnAttrs(
B);
6669 bool MayCheckCall = MS.EagerChecks;
6673 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6676 unsigned ArgOffset = 0;
6679 if (!
A->getType()->isSized()) {
6680 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6684 if (
A->getType()->isScalableTy()) {
6685 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6687 insertCheckShadowOf(
A, &CB);
6692 const DataLayout &
DL =
F.getDataLayout();
6696 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6699 insertCheckShadowOf(
A, &CB);
6700 Size =
DL.getTypeAllocSize(
A->getType());
6706 Value *ArgShadow = getShadow(
A);
6707 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6709 <<
" Shadow: " << *ArgShadow <<
"\n");
6713 assert(
A->getType()->isPointerTy() &&
6714 "ByVal argument is not a pointer!");
6719 MaybeAlign Alignment = std::nullopt;
6722 Value *AShadowPtr, *AOriginPtr;
6723 std::tie(AShadowPtr, AOriginPtr) =
6724 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6726 if (!PropagateShadow) {
6733 if (MS.TrackOrigins) {
6734 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6748 Size =
DL.getTypeAllocSize(
A->getType());
6754 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6756 getOriginPtrForArgument(IRB, ArgOffset));
6759 assert(Store !=
nullptr);
6768 if (FT->isVarArg()) {
6769 VAHelper->visitCallBase(CB, IRB);
6779 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6780 setShadow(&CB, getCleanShadow(&CB));
6781 setOrigin(&CB, getCleanOrigin());
6787 Value *
Base = getShadowPtrForRetval(IRBBefore);
6788 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6800 setShadow(&CB, getCleanShadow(&CB));
6801 setOrigin(&CB, getCleanOrigin());
6808 "Could not find insertion point for retval shadow load");
6811 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6814 setShadow(&CB, RetvalShadow);
6815 if (MS.TrackOrigins)
6816 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6821 RetVal =
I->getOperand(0);
6824 return I->isMustTailCall();
6829 void visitReturnInst(ReturnInst &
I) {
6831 Value *RetVal =
I.getReturnValue();
6837 Value *ShadowPtr = getShadowPtrForRetval(IRB);
6838 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
6839 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6842 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
6844 Value *Shadow = getShadow(RetVal);
6845 bool StoreOrigin =
true;
6847 insertCheckShadowOf(RetVal, &
I);
6848 Shadow = getCleanShadow(RetVal);
6849 StoreOrigin =
false;
6856 if (MS.TrackOrigins && StoreOrigin)
6857 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
6861 void visitPHINode(PHINode &
I) {
6863 if (!PropagateShadow) {
6864 setShadow(&
I, getCleanShadow(&
I));
6865 setOrigin(&
I, getCleanOrigin());
6869 ShadowPHINodes.push_back(&
I);
6870 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
6872 if (MS.TrackOrigins)
6874 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
6877 Value *getLocalVarIdptr(AllocaInst &
I) {
6878 ConstantInt *IntConst =
6879 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
6880 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
6885 Value *getLocalVarDescription(AllocaInst &
I) {
6891 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
6893 Value *ShadowBase, *OriginBase;
6894 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
6898 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
6901 if (PoisonStack && MS.TrackOrigins) {
6902 Value *Idptr = getLocalVarIdptr(
I);
6904 Value *Descr = getLocalVarDescription(
I);
6905 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
6906 {&I, Len, Idptr, Descr});
6908 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
6914 Value *Descr = getLocalVarDescription(
I);
6916 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
6918 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
6922 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
6925 NextNodeIRBuilder IRB(InsPoint);
6926 const DataLayout &
DL =
F.getDataLayout();
6927 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
6929 if (
I.isArrayAllocation())
6933 if (MS.CompileKernel)
6934 poisonAllocaKmsan(
I, IRB, Len);
6936 poisonAllocaUserspace(
I, IRB, Len);
6939 void visitAllocaInst(AllocaInst &
I) {
6940 setShadow(&
I, getCleanShadow(&
I));
6941 setOrigin(&
I, getCleanOrigin());
6947 void visitSelectInst(SelectInst &
I) {
6953 handleSelectLikeInst(
I,
B,
C,
D);
6959 Value *Sb = getShadow(
B);
6960 Value *Sc = getShadow(
C);
6961 Value *Sd = getShadow(
D);
6963 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
6964 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
6965 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
6970 if (
I.getType()->isAggregateType()) {
6974 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
6982 C = CreateAppToShadowCast(IRB,
C);
6983 D = CreateAppToShadowCast(IRB,
D);
6990 if (MS.TrackOrigins) {
6993 if (
B->getType()->isVectorTy()) {
6994 B = convertToBool(
B, IRB);
6995 Sb = convertToBool(Sb, IRB);
7003 void visitLandingPadInst(LandingPadInst &
I) {
7006 setShadow(&
I, getCleanShadow(&
I));
7007 setOrigin(&
I, getCleanOrigin());
7010 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7011 setShadow(&
I, getCleanShadow(&
I));
7012 setOrigin(&
I, getCleanOrigin());
7015 void visitFuncletPadInst(FuncletPadInst &
I) {
7016 setShadow(&
I, getCleanShadow(&
I));
7017 setOrigin(&
I, getCleanOrigin());
7020 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7022 void visitExtractValueInst(ExtractValueInst &
I) {
7024 Value *Agg =
I.getAggregateOperand();
7026 Value *AggShadow = getShadow(Agg);
7030 setShadow(&
I, ResShadow);
7031 setOriginForNaryOp(
I);
7034 void visitInsertValueInst(InsertValueInst &
I) {
7037 Value *AggShadow = getShadow(
I.getAggregateOperand());
7038 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7044 setOriginForNaryOp(
I);
7047 void dumpInst(Instruction &
I) {
7051 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7053 errs() <<
"QQQ " <<
I <<
"\n";
7056 void visitResumeInst(ResumeInst &
I) {
7061 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7066 void visitCatchReturnInst(CatchReturnInst &CRI) {
7071 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7080 insertCheckShadowOf(Operand, &
I);
7087 auto Size =
DL.getTypeStoreSize(ElemTy);
7089 if (MS.CompileKernel) {
7090 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7096 auto [ShadowPtr,
_] =
7097 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7107 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7108 int NumRetOutputs = 0;
7115 NumRetOutputs =
ST->getNumElements();
7120 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7121 switch (
Info.Type) {
7129 return NumOutputs - NumRetOutputs;
7132 void visitAsmInstruction(Instruction &
I) {
7148 const DataLayout &
DL =
F.getDataLayout();
7152 int OutputArgs = getNumOutputArgs(IA, CB);
7158 for (
int i = OutputArgs; i < NumOperands; i++) {
7166 for (
int i = 0; i < OutputArgs; i++) {
7172 setShadow(&
I, getCleanShadow(&
I));
7173 setOrigin(&
I, getCleanOrigin());
7176 void visitFreezeInst(FreezeInst &
I) {
7178 setShadow(&
I, getCleanShadow(&
I));
7179 setOrigin(&
I, getCleanOrigin());
7182 void visitInstruction(Instruction &
I) {
7187 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7188 Value *Operand =
I.getOperand(i);
7190 insertCheckShadowOf(Operand, &
I);
7192 setShadow(&
I, getCleanShadow(&
I));
7193 setOrigin(&
I, getCleanOrigin());
7197struct VarArgHelperBase :
public VarArgHelper {
7199 MemorySanitizer &MS;
7200 MemorySanitizerVisitor &MSV;
7202 const unsigned VAListTagSize;
7204 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7205 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7206 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7210 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7216 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7225 return getShadowPtrForVAArgument(IRB, ArgOffset);
7234 ConstantInt::get(MS.IntptrTy, ArgOffset),
7239 unsigned BaseOffset) {
7248 TailSize,
Align(8));
7251 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7253 Value *VAListTag =
I.getArgOperand(0);
7255 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7256 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7259 VAListTagSize, Alignment,
false);
7262 void visitVAStartInst(VAStartInst &
I)
override {
7263 if (
F.getCallingConv() == CallingConv::Win64)
7266 unpoisonVAListTagForInst(
I);
7269 void visitVACopyInst(VACopyInst &
I)
override {
7270 if (
F.getCallingConv() == CallingConv::Win64)
7272 unpoisonVAListTagForInst(
I);
7277struct VarArgAMD64Helper :
public VarArgHelperBase {
7280 static const unsigned AMD64GpEndOffset = 48;
7281 static const unsigned AMD64FpEndOffsetSSE = 176;
7283 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7285 unsigned AMD64FpEndOffset;
7286 AllocaInst *VAArgTLSCopy =
nullptr;
7287 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7288 Value *VAArgOverflowSize =
nullptr;
7290 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7292 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7293 MemorySanitizerVisitor &MSV)
7294 : VarArgHelperBase(
F, MS, MSV, 24) {
7295 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7296 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7297 if (Attr.isStringAttribute() &&
7298 (Attr.getKindAsString() ==
"target-features")) {
7299 if (Attr.getValueAsString().contains(
"-sse"))
7300 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7306 ArgKind classifyArgument(
Value *arg) {
7309 if (
T->isX86_FP80Ty())
7311 if (
T->isFPOrFPVectorTy())
7312 return AK_FloatingPoint;
7313 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7314 return AK_GeneralPurpose;
7315 if (
T->isPointerTy())
7316 return AK_GeneralPurpose;
7328 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7329 unsigned GpOffset = 0;
7330 unsigned FpOffset = AMD64GpEndOffset;
7331 unsigned OverflowOffset = AMD64FpEndOffset;
7332 const DataLayout &
DL =
F.getDataLayout();
7336 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7343 assert(
A->getType()->isPointerTy());
7345 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7346 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7347 unsigned BaseOffset = OverflowOffset;
7348 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7349 Value *OriginBase =
nullptr;
7350 if (MS.TrackOrigins)
7351 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7352 OverflowOffset += AlignedSize;
7355 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7359 Value *ShadowPtr, *OriginPtr;
7360 std::tie(ShadowPtr, OriginPtr) =
7365 if (MS.TrackOrigins)
7369 ArgKind AK = classifyArgument(
A);
7370 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7372 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7374 Value *ShadowBase, *OriginBase =
nullptr;
7376 case AK_GeneralPurpose:
7377 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7378 if (MS.TrackOrigins)
7379 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7383 case AK_FloatingPoint:
7384 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7385 if (MS.TrackOrigins)
7386 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7393 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7394 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7395 unsigned BaseOffset = OverflowOffset;
7396 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7397 if (MS.TrackOrigins) {
7398 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7400 OverflowOffset += AlignedSize;
7403 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7412 Value *Shadow = MSV.getShadow(
A);
7414 if (MS.TrackOrigins) {
7415 Value *Origin = MSV.getOrigin(
A);
7416 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7417 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7423 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7424 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7427 void finalizeInstrumentation()
override {
7428 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7429 "finalizeInstrumentation called twice");
7430 if (!VAStartInstrumentationList.
empty()) {
7437 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7438 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7444 Intrinsic::umin, CopySize,
7448 if (MS.TrackOrigins) {
7449 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7458 for (CallInst *OrigInst : VAStartInstrumentationList) {
7459 NextNodeIRBuilder IRB(OrigInst);
7460 Value *VAListTag = OrigInst->getArgOperand(0);
7462 Value *RegSaveAreaPtrPtr =
7463 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7465 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7467 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7468 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7470 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7472 if (MS.TrackOrigins)
7473 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7474 Alignment, AMD64FpEndOffset);
7475 Value *OverflowArgAreaPtrPtr =
7476 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7477 Value *OverflowArgAreaPtr =
7478 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7479 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7480 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7481 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7485 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7487 if (MS.TrackOrigins) {
7490 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7498struct VarArgAArch64Helper :
public VarArgHelperBase {
7499 static const unsigned kAArch64GrArgSize = 64;
7500 static const unsigned kAArch64VrArgSize = 128;
7502 static const unsigned AArch64GrBegOffset = 0;
7503 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7505 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7506 static const unsigned AArch64VrEndOffset =
7507 AArch64VrBegOffset + kAArch64VrArgSize;
7508 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7510 AllocaInst *VAArgTLSCopy =
nullptr;
7511 Value *VAArgOverflowSize =
nullptr;
7513 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7515 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7516 MemorySanitizerVisitor &MSV)
7517 : VarArgHelperBase(
F, MS, MSV, 32) {}
7520 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7521 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7522 return {AK_GeneralPurpose, 1};
7523 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7524 return {AK_FloatingPoint, 1};
7526 if (
T->isArrayTy()) {
7527 auto R = classifyArgument(
T->getArrayElementType());
7528 R.second *=
T->getScalarType()->getArrayNumElements();
7533 auto R = classifyArgument(FV->getScalarType());
7534 R.second *= FV->getNumElements();
7539 return {AK_Memory, 0};
7551 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7552 unsigned GrOffset = AArch64GrBegOffset;
7553 unsigned VrOffset = AArch64VrBegOffset;
7554 unsigned OverflowOffset = AArch64VAEndOffset;
7556 const DataLayout &
DL =
F.getDataLayout();
7559 auto [AK, RegNum] = classifyArgument(
A->getType());
7560 if (AK == AK_GeneralPurpose &&
7561 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7563 if (AK == AK_FloatingPoint &&
7564 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7568 case AK_GeneralPurpose:
7569 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7570 GrOffset += 8 * RegNum;
7572 case AK_FloatingPoint:
7573 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7574 VrOffset += 16 * RegNum;
7581 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7582 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7583 unsigned BaseOffset = OverflowOffset;
7584 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7585 OverflowOffset += AlignedSize;
7588 CleanUnusedTLS(IRB,
Base, BaseOffset);
7600 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7601 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7606 Value *SaveAreaPtrPtr =
7607 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7608 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7613 Value *SaveAreaPtr =
7614 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7616 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7619 void finalizeInstrumentation()
override {
7620 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7621 "finalizeInstrumentation called twice");
7622 if (!VAStartInstrumentationList.empty()) {
7629 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7630 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7636 Intrinsic::umin, CopySize,
7642 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7643 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7647 for (CallInst *OrigInst : VAStartInstrumentationList) {
7648 NextNodeIRBuilder IRB(OrigInst);
7650 Value *VAListTag = OrigInst->getArgOperand(0);
7667 Value *StackSaveAreaPtr =
7668 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7671 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7672 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7675 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7678 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7679 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7682 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7688 Value *GrRegSaveAreaShadowPtrOff =
7689 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7691 Value *GrRegSaveAreaShadowPtr =
7692 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7698 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7704 Value *VrRegSaveAreaShadowPtrOff =
7705 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7707 Value *VrRegSaveAreaShadowPtr =
7708 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7715 VrRegSaveAreaShadowPtrOff);
7716 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7722 Value *StackSaveAreaShadowPtr =
7723 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7728 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7731 Align(16), VAArgOverflowSize);
7737struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7738 AllocaInst *VAArgTLSCopy =
nullptr;
7739 Value *VAArgSize =
nullptr;
7741 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7742 MemorySanitizerVisitor &MSV)
7743 : VarArgHelperBase(
F, MS, MSV, 8) {}
7745 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7753 Triple TargetTriple(
F.getParent()->getTargetTriple());
7757 if (TargetTriple.isPPC64ELFv2ABI())
7761 unsigned VAArgOffset = VAArgBase;
7762 const DataLayout &
DL =
F.getDataLayout();
7765 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7767 assert(
A->getType()->isPointerTy());
7769 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7772 ArgAlign =
Align(8);
7773 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7776 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7778 Value *AShadowPtr, *AOriginPtr;
7779 std::tie(AShadowPtr, AOriginPtr) =
7780 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7790 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7792 if (
A->getType()->isArrayTy()) {
7795 Type *ElementTy =
A->getType()->getArrayElementType();
7797 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7798 }
else if (
A->getType()->isVectorTy()) {
7800 ArgAlign =
Align(ArgSize);
7803 ArgAlign =
Align(8);
7804 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7805 if (
DL.isBigEndian()) {
7809 VAArgOffset += (8 - ArgSize);
7813 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7817 VAArgOffset += ArgSize;
7821 VAArgBase = VAArgOffset;
7825 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7828 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7831 void finalizeInstrumentation()
override {
7832 assert(!VAArgSize && !VAArgTLSCopy &&
7833 "finalizeInstrumentation called twice");
7836 Value *CopySize = VAArgSize;
7838 if (!VAStartInstrumentationList.empty()) {
7842 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7848 Intrinsic::umin, CopySize,
7856 for (CallInst *OrigInst : VAStartInstrumentationList) {
7857 NextNodeIRBuilder IRB(OrigInst);
7858 Value *VAListTag = OrigInst->getArgOperand(0);
7861 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7864 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7865 const DataLayout &
DL =
F.getDataLayout();
7866 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7868 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7869 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7871 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7878struct VarArgPowerPC32Helper :
public VarArgHelperBase {
7879 AllocaInst *VAArgTLSCopy =
nullptr;
7880 Value *VAArgSize =
nullptr;
7882 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
7883 MemorySanitizerVisitor &MSV)
7884 : VarArgHelperBase(
F, MS, MSV, 12) {}
7886 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7890 unsigned VAArgOffset = VAArgBase;
7891 const DataLayout &
DL =
F.getDataLayout();
7892 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7895 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7897 assert(
A->getType()->isPointerTy());
7899 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7901 if (ArgAlign < IntptrSize)
7902 ArgAlign =
Align(IntptrSize);
7903 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7906 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7908 Value *AShadowPtr, *AOriginPtr;
7909 std::tie(AShadowPtr, AOriginPtr) =
7910 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7920 Type *ArgTy =
A->getType();
7926 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
7933 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7936 ArgAlign =
Align(ArgSize);
7938 if (ArgAlign < IntptrSize)
7939 ArgAlign =
Align(IntptrSize);
7940 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7941 if (
DL.isBigEndian()) {
7944 if (ArgSize < IntptrSize)
7945 VAArgOffset += (IntptrSize - ArgSize);
7948 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
7954 VAArgOffset += ArgSize;
7961 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7964 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7967 void finalizeInstrumentation()
override {
7968 assert(!VAArgSize && !VAArgTLSCopy &&
7969 "finalizeInstrumentation called twice");
7971 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7972 Value *CopySize = VAArgSize;
7974 if (!VAStartInstrumentationList.empty()) {
7978 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7984 Intrinsic::umin, CopySize,
7992 for (CallInst *OrigInst : VAStartInstrumentationList) {
7993 NextNodeIRBuilder IRB(OrigInst);
7994 Value *VAListTag = OrigInst->getArgOperand(0);
7996 Value *RegSaveAreaSize = CopySize;
8000 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8004 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8006 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8009 const DataLayout &
DL =
F.getDataLayout();
8010 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8014 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8015 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8016 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8018 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8019 Alignment, RegSaveAreaSize);
8021 RegSaveAreaShadowPtr =
8024 ConstantInt::get(MS.IntptrTy, 32));
8029 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8034 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8037 OverflowAreaPtrPtr =
8038 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8039 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8041 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8043 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8044 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8045 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8048 Value *OverflowVAArgTLSCopyPtr =
8050 OverflowVAArgTLSCopyPtr =
8051 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8053 OverflowVAArgTLSCopyPtr =
8056 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8063struct VarArgSystemZHelper :
public VarArgHelperBase {
8064 static const unsigned SystemZGpOffset = 16;
8065 static const unsigned SystemZGpEndOffset = 56;
8066 static const unsigned SystemZFpOffset = 128;
8067 static const unsigned SystemZFpEndOffset = 160;
8068 static const unsigned SystemZMaxVrArgs = 8;
8069 static const unsigned SystemZRegSaveAreaSize = 160;
8070 static const unsigned SystemZOverflowOffset = 160;
8071 static const unsigned SystemZVAListTagSize = 32;
8072 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8073 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8075 bool IsSoftFloatABI;
8076 AllocaInst *VAArgTLSCopy =
nullptr;
8077 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8078 Value *VAArgOverflowSize =
nullptr;
8080 enum class ArgKind {
8088 enum class ShadowExtension {
None,
Zero, Sign };
8090 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8091 MemorySanitizerVisitor &MSV)
8092 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8093 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8095 ArgKind classifyArgument(
Type *
T) {
8102 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8103 return ArgKind::Indirect;
8104 if (
T->isFloatingPointTy())
8105 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8106 if (
T->isIntegerTy() ||
T->isPointerTy())
8107 return ArgKind::GeneralPurpose;
8108 if (
T->isVectorTy())
8109 return ArgKind::Vector;
8110 return ArgKind::Memory;
8113 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8123 return ShadowExtension::Zero;
8127 return ShadowExtension::Sign;
8129 return ShadowExtension::None;
8132 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8133 unsigned GpOffset = SystemZGpOffset;
8134 unsigned FpOffset = SystemZFpOffset;
8135 unsigned VrIndex = 0;
8136 unsigned OverflowOffset = SystemZOverflowOffset;
8137 const DataLayout &
DL =
F.getDataLayout();
8143 ArgKind AK = classifyArgument(
T);
8144 if (AK == ArgKind::Indirect) {
8146 AK = ArgKind::GeneralPurpose;
8148 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8149 AK = ArgKind::Memory;
8150 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8151 AK = ArgKind::Memory;
8152 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8153 AK = ArgKind::Memory;
8154 Value *ShadowBase =
nullptr;
8155 Value *OriginBase =
nullptr;
8156 ShadowExtension SE = ShadowExtension::None;
8158 case ArgKind::GeneralPurpose: {
8160 uint64_t ArgSize = 8;
8163 SE = getShadowExtension(CB, ArgNo);
8164 uint64_t GapSize = 0;
8165 if (SE == ShadowExtension::None) {
8166 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8167 assert(ArgAllocSize <= ArgSize);
8168 GapSize = ArgSize - ArgAllocSize;
8170 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8171 if (MS.TrackOrigins)
8172 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8174 GpOffset += ArgSize;
8180 case ArgKind::FloatingPoint: {
8182 uint64_t ArgSize = 8;
8189 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8190 if (MS.TrackOrigins)
8191 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8193 FpOffset += ArgSize;
8199 case ArgKind::Vector: {
8206 case ArgKind::Memory: {
8211 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8212 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8214 SE = getShadowExtension(CB, ArgNo);
8216 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8218 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8219 if (MS.TrackOrigins)
8221 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8222 OverflowOffset += ArgSize;
8229 case ArgKind::Indirect:
8232 if (ShadowBase ==
nullptr)
8234 Value *Shadow = MSV.getShadow(
A);
8235 if (SE != ShadowExtension::None)
8236 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8237 SE == ShadowExtension::Sign);
8238 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8240 if (MS.TrackOrigins) {
8241 Value *Origin = MSV.getOrigin(
A);
8242 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8243 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8247 Constant *OverflowSize = ConstantInt::get(
8248 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8249 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8256 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8259 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8261 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8262 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8267 unsigned RegSaveAreaSize =
8268 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8269 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8271 if (MS.TrackOrigins)
8272 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8273 Alignment, RegSaveAreaSize);
8282 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8284 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8285 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8287 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8288 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8291 SystemZOverflowOffset);
8292 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8294 if (MS.TrackOrigins) {
8296 SystemZOverflowOffset);
8297 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8302 void finalizeInstrumentation()
override {
8303 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8304 "finalizeInstrumentation called twice");
8305 if (!VAStartInstrumentationList.empty()) {
8312 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8314 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8320 Intrinsic::umin, CopySize,
8324 if (MS.TrackOrigins) {
8325 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8334 for (CallInst *OrigInst : VAStartInstrumentationList) {
8335 NextNodeIRBuilder IRB(OrigInst);
8336 Value *VAListTag = OrigInst->getArgOperand(0);
8337 copyRegSaveArea(IRB, VAListTag);
8338 copyOverflowArea(IRB, VAListTag);
8344struct VarArgI386Helper :
public VarArgHelperBase {
8345 AllocaInst *VAArgTLSCopy =
nullptr;
8346 Value *VAArgSize =
nullptr;
8348 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8349 MemorySanitizerVisitor &MSV)
8350 : VarArgHelperBase(
F, MS, MSV, 4) {}
8352 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8353 const DataLayout &
DL =
F.getDataLayout();
8354 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8355 unsigned VAArgOffset = 0;
8358 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8360 assert(
A->getType()->isPointerTy());
8362 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8364 if (ArgAlign < IntptrSize)
8365 ArgAlign =
Align(IntptrSize);
8366 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8368 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8370 Value *AShadowPtr, *AOriginPtr;
8371 std::tie(AShadowPtr, AOriginPtr) =
8372 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8382 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8384 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8385 if (
DL.isBigEndian()) {
8388 if (ArgSize < IntptrSize)
8389 VAArgOffset += (IntptrSize - ArgSize);
8392 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8395 VAArgOffset += ArgSize;
8401 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8404 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8407 void finalizeInstrumentation()
override {
8408 assert(!VAArgSize && !VAArgTLSCopy &&
8409 "finalizeInstrumentation called twice");
8411 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8412 Value *CopySize = VAArgSize;
8414 if (!VAStartInstrumentationList.empty()) {
8417 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8423 Intrinsic::umin, CopySize,
8431 for (CallInst *OrigInst : VAStartInstrumentationList) {
8432 NextNodeIRBuilder IRB(OrigInst);
8433 Value *VAListTag = OrigInst->getArgOperand(0);
8434 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8435 Value *RegSaveAreaPtrPtr =
8437 PointerType::get(*MS.C, 0));
8438 Value *RegSaveAreaPtr =
8439 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8440 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8441 const DataLayout &
DL =
F.getDataLayout();
8442 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8444 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8445 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8447 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8455struct VarArgGenericHelper :
public VarArgHelperBase {
8456 AllocaInst *VAArgTLSCopy =
nullptr;
8457 Value *VAArgSize =
nullptr;
8459 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8460 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8461 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8463 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8464 unsigned VAArgOffset = 0;
8465 const DataLayout &
DL =
F.getDataLayout();
8466 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8471 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8472 if (
DL.isBigEndian()) {
8475 if (ArgSize < IntptrSize)
8476 VAArgOffset += (IntptrSize - ArgSize);
8478 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8479 VAArgOffset += ArgSize;
8480 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8486 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8489 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8492 void finalizeInstrumentation()
override {
8493 assert(!VAArgSize && !VAArgTLSCopy &&
8494 "finalizeInstrumentation called twice");
8496 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8497 Value *CopySize = VAArgSize;
8499 if (!VAStartInstrumentationList.empty()) {
8502 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8508 Intrinsic::umin, CopySize,
8516 for (CallInst *OrigInst : VAStartInstrumentationList) {
8517 NextNodeIRBuilder IRB(OrigInst);
8518 Value *VAListTag = OrigInst->getArgOperand(0);
8519 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8520 Value *RegSaveAreaPtrPtr =
8522 PointerType::get(*MS.C, 0));
8523 Value *RegSaveAreaPtr =
8524 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8525 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8526 const DataLayout &
DL =
F.getDataLayout();
8527 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8529 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8530 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8532 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8540using VarArgARM32Helper = VarArgGenericHelper;
8541using VarArgRISCVHelper = VarArgGenericHelper;
8542using VarArgMIPSHelper = VarArgGenericHelper;
8543using VarArgLoongArch64Helper = VarArgGenericHelper;
8546struct VarArgNoOpHelper :
public VarArgHelper {
8547 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8548 MemorySanitizerVisitor &MSV) {}
8550 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8552 void visitVAStartInst(VAStartInst &
I)
override {}
8554 void visitVACopyInst(VACopyInst &
I)
override {}
8556 void finalizeInstrumentation()
override {}
8562 MemorySanitizerVisitor &Visitor) {
8565 Triple TargetTriple(Func.getParent()->getTargetTriple());
8568 return new VarArgI386Helper(Func, Msan, Visitor);
8571 return new VarArgAMD64Helper(Func, Msan, Visitor);
8573 if (TargetTriple.
isARM())
8574 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8577 return new VarArgAArch64Helper(Func, Msan, Visitor);
8580 return new VarArgSystemZHelper(Func, Msan, Visitor);
8585 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8588 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8591 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8594 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8597 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8600 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8603 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8606 return new VarArgNoOpHelper(Func, Msan, Visitor);
8613 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8616 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8623 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(unsigned CounterName)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.