184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
241 "msan-track-origins",
246 cl::desc(
"keep going after reporting a UMR"),
255 "msan-poison-stack-with-call",
260 "msan-poison-stack-pattern",
261 cl::desc(
"poison uninitialized stack variables with the given pattern"),
266 cl::desc(
"Print name of local stack variable"),
271 cl::desc(
"Poison fully undef temporary values. "
272 "Partially undefined constant vectors "
273 "are unaffected by this flag (see "
274 "-msan-poison-undef-vectors)."),
278 "msan-poison-undef-vectors",
279 cl::desc(
"Precisely poison partially undefined constant vectors. "
280 "If false (legacy behavior), the entire vector is "
281 "considered fully initialized, which may lead to false "
282 "negatives. Fully undefined constant vectors are "
283 "unaffected by this flag (see -msan-poison-undef)."),
287 "msan-precise-disjoint-or",
288 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
289 "disjointedness is ignored (i.e., 1|1 is initialized)."),
294 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
299 cl::desc(
"exact handling of relational integer ICmp"),
303 "msan-handle-lifetime-intrinsics",
305 "when possible, poison scoped variables at the beginning of the scope "
306 "(slower, but more precise)"),
317 "msan-handle-asm-conservative",
328 "msan-check-access-address",
329 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
334 cl::desc(
"check arguments and return values at function call boundaries"),
338 "msan-dump-strict-instructions",
339 cl::desc(
"print out instructions with default strict semantics i.e.,"
340 "check that all the inputs are fully initialized, and mark "
341 "the output as fully initialized. These semantics are applied "
342 "to instructions that could not be handled explicitly nor "
351 "msan-dump-heuristic-instructions",
352 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
353 "Use -msan-dump-strict-instructions to print instructions that "
354 "could not be handled explicitly nor heuristically."),
358 "msan-instrumentation-with-call-threshold",
360 "If the function being instrumented requires more than "
361 "this number of checks and origin stores, use callbacks instead of "
362 "inline checks (-1 means never use callbacks)."),
367 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
377 cl::desc(
"Insert checks for constant shadow values"),
384 cl::desc(
"Place MSan constructors in comdat sections"),
390 cl::desc(
"Define custom MSan AndMask"),
394 cl::desc(
"Define custom MSan XorMask"),
398 cl::desc(
"Define custom MSan ShadowBase"),
402 cl::desc(
"Define custom MSan OriginBase"),
407 cl::desc(
"Define threshold for number of checks per "
408 "debug location to force origin update."),
420struct MemoryMapParams {
427struct PlatformMemoryMapParams {
428 const MemoryMapParams *bits32;
429 const MemoryMapParams *bits64;
591class MemorySanitizer {
600 MemorySanitizer(MemorySanitizer &&) =
delete;
601 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
602 MemorySanitizer(
const MemorySanitizer &) =
delete;
603 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
605 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
608 friend struct MemorySanitizerVisitor;
609 friend struct VarArgHelperBase;
610 friend struct VarArgAMD64Helper;
611 friend struct VarArgAArch64Helper;
612 friend struct VarArgPowerPC64Helper;
613 friend struct VarArgPowerPC32Helper;
614 friend struct VarArgSystemZHelper;
615 friend struct VarArgI386Helper;
616 friend struct VarArgGenericHelper;
618 void initializeModule(
Module &M);
619 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
620 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
621 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
623 template <
typename... ArgsTy>
624 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
650 Value *ParamOriginTLS;
656 Value *RetvalOriginTLS;
662 Value *VAArgOriginTLS;
665 Value *VAArgOverflowSizeTLS;
668 bool CallbacksInitialized =
false;
671 FunctionCallee WarningFn;
675 FunctionCallee MaybeWarningVarSizeFn;
680 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
682 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
685 FunctionCallee MsanPoisonStackFn;
689 FunctionCallee MsanChainOriginFn;
692 FunctionCallee MsanSetOriginFn;
695 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
698 StructType *MsanContextStateTy;
699 FunctionCallee MsanGetContextStateFn;
702 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
708 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
709 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
710 FunctionCallee MsanMetadataPtrForStore_1_8[4];
711 FunctionCallee MsanInstrumentAsmStoreFn;
714 Value *MsanMetadataAlloca;
717 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
720 const MemoryMapParams *MapParams;
724 MemoryMapParams CustomMapParams;
726 MDNode *ColdCallWeights;
729 MDNode *OriginStoreWeights;
732void insertModuleCtor(
Module &M) {
769 if (!Options.Kernel) {
778 MemorySanitizer Msan(*
F.getParent(), Options);
797 OS, MapClassName2PassName);
803 if (Options.EagerChecks)
804 OS <<
"eager-checks;";
805 OS <<
"track-origins=" << Options.TrackOrigins;
821template <
typename... ArgsTy>
823MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
828 std::forward<ArgsTy>(Args)...);
831 return M.getOrInsertFunction(Name, MsanMetadata,
832 std::forward<ArgsTy>(Args)...);
841 RetvalOriginTLS =
nullptr;
843 ParamOriginTLS =
nullptr;
845 VAArgOriginTLS =
nullptr;
846 VAArgOverflowSizeTLS =
nullptr;
848 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
850 IRB.getVoidTy(), IRB.getInt32Ty());
861 MsanGetContextStateFn =
862 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
866 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
867 std::string name_load =
868 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
869 std::string name_store =
870 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
871 MsanMetadataPtrForLoad_1_8[ind] =
872 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
873 MsanMetadataPtrForStore_1_8[ind] =
874 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
877 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
878 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
879 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
880 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
883 MsanPoisonAllocaFn =
M.getOrInsertFunction(
884 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
885 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
886 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
890 return M.getOrInsertGlobal(Name, Ty, [&] {
892 nullptr, Name,
nullptr,
898void MemorySanitizer::createUserspaceApi(
Module &M,
906 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
907 :
"__msan_warning_with_origin_noreturn";
908 WarningFn =
M.getOrInsertFunction(WarningFnName,
910 IRB.getVoidTy(), IRB.getInt32Ty());
913 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
914 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
941 IRB.getIntPtrTy(
M.getDataLayout()));
945 unsigned AccessSize = 1 << AccessSizeIndex;
946 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
947 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
949 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
950 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
951 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
952 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
953 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
954 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
956 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
960 MsanSetAllocaOriginWithDescriptionFn =
961 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
962 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
963 MsanSetAllocaOriginNoDescriptionFn =
964 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
965 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
966 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
967 IRB.getVoidTy(), PtrTy, IntptrTy);
971void MemorySanitizer::initializeCallbacks(
Module &M,
974 if (CallbacksInitialized)
980 MsanChainOriginFn =
M.getOrInsertFunction(
981 "__msan_chain_origin",
984 MsanSetOriginFn =
M.getOrInsertFunction(
986 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
988 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
990 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
991 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
993 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
995 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
996 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
999 createKernelApi(M, TLI);
1001 createUserspaceApi(M, TLI);
1003 CallbacksInitialized =
true;
1009 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1027void MemorySanitizer::initializeModule(
Module &M) {
1028 auto &
DL =
M.getDataLayout();
1030 TargetTriple =
M.getTargetTriple();
1032 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1033 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1035 if (ShadowPassed || OriginPassed) {
1040 MapParams = &CustomMapParams;
1042 switch (TargetTriple.getOS()) {
1044 switch (TargetTriple.getArch()) {
1059 switch (TargetTriple.getArch()) {
1068 switch (TargetTriple.getArch()) {
1102 C = &(
M.getContext());
1104 IntptrTy = IRB.getIntPtrTy(
DL);
1105 OriginTy = IRB.getInt32Ty();
1106 PtrTy = IRB.getPtrTy();
1111 if (!CompileKernel) {
1113 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1114 return new GlobalVariable(
1115 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1116 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1120 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1121 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1122 GlobalValue::WeakODRLinkage,
1123 IRB.getInt32(Recover),
"__msan_keep_going");
1138struct VarArgHelper {
1139 virtual ~VarArgHelper() =
default;
1142 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1145 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1148 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1154 virtual void finalizeInstrumentation() = 0;
1157struct MemorySanitizerVisitor;
1162 MemorySanitizerVisitor &Visitor);
1169 if (TypeSizeFixed <= 8)
1178class NextNodeIRBuilder :
public IRBuilder<> {
1191struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1193 MemorySanitizer &MS;
1195 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1196 std::unique_ptr<VarArgHelper> VAHelper;
1197 const TargetLibraryInfo *TLI;
1204 bool PropagateShadow;
1207 bool PoisonUndefVectors;
1209 struct ShadowOriginAndInsertPoint {
1214 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1215 : Shadow(S), Origin(
O), OrigIns(
I) {}
1218 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1219 SmallSetVector<AllocaInst *, 16> AllocaSet;
1222 int64_t SplittableBlocksCount = 0;
1224 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1225 const TargetLibraryInfo &TLI)
1227 bool SanitizeFunction =
1229 InsertChecks = SanitizeFunction;
1230 PropagateShadow = SanitizeFunction;
1241 MS.initializeCallbacks(*
F.getParent(), TLI);
1243 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1244 .CreateIntrinsic(Intrinsic::donothing, {});
1246 if (MS.CompileKernel) {
1248 insertKmsanPrologue(IRB);
1252 <<
"MemorySanitizer is not inserting checks into '"
1253 <<
F.getName() <<
"'\n");
1256 bool instrumentWithCalls(
Value *V) {
1260 ++SplittableBlocksCount;
1265 bool isInPrologue(Instruction &
I) {
1266 return I.getParent() == FnPrologueEnd->
getParent() &&
1275 if (MS.TrackOrigins <= 1)
1277 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1281 const DataLayout &
DL =
F.getDataLayout();
1282 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1292 TypeSize TS, Align Alignment) {
1293 const DataLayout &
DL =
F.getDataLayout();
1294 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1295 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1307 auto [InsertPt,
Index] =
1319 Align CurrentAlignment = Alignment;
1320 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1321 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1323 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1328 CurrentAlignment = IntptrAlignment;
1341 Value *OriginPtr, Align Alignment) {
1342 const DataLayout &
DL =
F.getDataLayout();
1344 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1346 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1355 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1362 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1364 if (instrumentWithCalls(ConvertedShadow) &&
1366 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1367 Value *ConvertedShadow2 =
1369 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1373 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1377 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1382 void materializeStores() {
1383 for (StoreInst *SI : StoreList) {
1385 Value *Val =
SI->getValueOperand();
1386 Value *Addr =
SI->getPointerOperand();
1387 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1388 Value *ShadowPtr, *OriginPtr;
1390 const Align Alignment =
SI->getAlign();
1392 std::tie(ShadowPtr, OriginPtr) =
1393 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1395 [[maybe_unused]] StoreInst *NewSI =
1402 if (MS.TrackOrigins && !
SI->isAtomic())
1403 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1410 if (MS.TrackOrigins < 2)
1413 if (LazyWarningDebugLocationCount.
empty())
1414 for (
const auto &
I : InstrumentationList)
1415 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1431 auto NewDebugLoc = OI->getDebugLoc();
1438 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1439 Origin = updateOrigin(Origin, IRBOrigin);
1444 if (MS.CompileKernel || MS.TrackOrigins)
1455 const DataLayout &
DL =
F.getDataLayout();
1456 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1458 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1460 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1461 Value *ConvertedShadow2 =
1465 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1469 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1473 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1476 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1479 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1480 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1485 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1488 !MS.Recover, MS.ColdCallWeights);
1491 insertWarningFn(IRB, Origin);
1496 void materializeInstructionChecks(
1498 const DataLayout &
DL =
F.getDataLayout();
1501 bool Combine = !MS.TrackOrigins;
1503 Value *Shadow =
nullptr;
1504 for (
const auto &ShadowData : InstructionChecks) {
1505 assert(ShadowData.OrigIns == Instruction);
1508 Value *ConvertedShadow = ShadowData.Shadow;
1517 insertWarningFn(IRB, ShadowData.Origin);
1527 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1532 Shadow = ConvertedShadow;
1536 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1537 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1538 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1544 materializeOneCheck(IRB, Shadow,
nullptr);
1548 static bool isAArch64SVCount(
Type *Ty) {
1550 return TTy->
getName() ==
"aarch64.svcount";
1556 static bool isScalableNonVectorType(
Type *Ty) {
1557 if (!isAArch64SVCount(Ty))
1558 LLVM_DEBUG(
dbgs() <<
"isScalableNonVectorType: Unexpected type " << *Ty
1564 void materializeChecks() {
1567 SmallPtrSet<Instruction *, 16>
Done;
1570 for (
auto I = InstrumentationList.begin();
1571 I != InstrumentationList.end();) {
1572 auto OrigIns =
I->OrigIns;
1576 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1577 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1578 return OrigIns != R.OrigIns;
1592 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1593 {Zero, IRB.getInt32(0)},
"param_shadow");
1594 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1595 {Zero, IRB.getInt32(1)},
"retval_shadow");
1596 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1597 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1598 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1599 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1600 MS.VAArgOverflowSizeTLS =
1601 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1602 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1603 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1604 {Zero, IRB.getInt32(5)},
"param_origin");
1605 MS.RetvalOriginTLS =
1606 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1607 {Zero, IRB.getInt32(6)},
"retval_origin");
1609 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1622 for (Instruction *
I : Instructions)
1626 for (PHINode *PN : ShadowPHINodes) {
1628 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1629 size_t NumValues = PN->getNumIncomingValues();
1630 for (
size_t v = 0;
v < NumValues;
v++) {
1631 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1633 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1637 VAHelper->finalizeInstrumentation();
1642 for (
auto Item : LifetimeStartList) {
1643 instrumentAlloca(*Item.second, Item.first);
1644 AllocaSet.
remove(Item.second);
1649 for (AllocaInst *AI : AllocaSet)
1650 instrumentAlloca(*AI);
1653 materializeChecks();
1657 materializeStores();
1663 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1674 const DataLayout &
DL =
F.getDataLayout();
1676 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1678 VT->getElementCount());
1681 return ArrayType::get(getShadowTy(AT->getElementType()),
1682 AT->getNumElements());
1686 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1687 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1689 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1692 if (isScalableNonVectorType(OrigTy)) {
1693 LLVM_DEBUG(
dbgs() <<
"getShadowTy: Scalable non-vector type: " << *OrigTy
1698 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1703 Value *collapseStructShadow(StructType *Struct,
Value *Shadow,
1708 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1711 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1713 if (Aggregator != FalseVal)
1714 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1716 Aggregator = ShadowBool;
1723 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1725 if (!
Array->getNumElements())
1729 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1731 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1733 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1734 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1744 return collapseStructShadow(Struct, V, IRB);
1746 return collapseArrayShadow(Array, V, IRB);
1751 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1759 Type *VTy =
V->getType();
1761 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1768 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1770 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1771 VectTy->getElementCount());
1777 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1779 return VectorType::get(
1780 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1781 VectTy->getElementCount());
1783 assert(IntPtrTy == MS.IntptrTy);
1790 VectTy->getElementCount(),
1791 constToIntPtr(VectTy->getElementType(),
C));
1793 assert(IntPtrTy == MS.IntptrTy);
1796 return ConstantInt::get(MS.IntptrTy,
C,
false,
1810 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 if (uint64_t AndMask = MS.MapParams->AndMask)
1814 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1816 if (uint64_t XorMask = MS.MapParams->XorMask)
1817 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1829 std::pair<Value *, Value *>
1831 MaybeAlign Alignment) {
1836 assert(VectTy->getElementType()->isPointerTy());
1838 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1839 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1840 Value *ShadowLong = ShadowOffset;
1841 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1843 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1846 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1848 Value *OriginPtr =
nullptr;
1849 if (MS.TrackOrigins) {
1850 Value *OriginLong = ShadowOffset;
1851 uint64_t OriginBase = MS.MapParams->OriginBase;
1852 if (OriginBase != 0)
1854 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1857 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1860 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1862 return std::make_pair(ShadowPtr, OriginPtr);
1865 template <
typename... ArgsTy>
1870 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1871 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1874 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1877 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1881 Value *ShadowOriginPtrs;
1882 const DataLayout &
DL =
F.getDataLayout();
1883 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1885 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1888 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1890 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1891 ShadowOriginPtrs = createMetadataCall(
1893 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1900 return std::make_pair(ShadowPtr, OriginPtr);
1906 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1913 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1918 Value *ShadowPtrs = ConstantInt::getNullValue(
1920 Value *OriginPtrs =
nullptr;
1921 if (MS.TrackOrigins)
1922 OriginPtrs = ConstantInt::getNullValue(
1924 for (
unsigned i = 0; i < NumElements; ++i) {
1927 auto [ShadowPtr, OriginPtr] =
1928 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1931 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1932 if (MS.TrackOrigins)
1934 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1936 return {ShadowPtrs, OriginPtrs};
1939 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1941 MaybeAlign Alignment,
1943 if (MS.CompileKernel)
1944 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1945 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1953 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1958 if (!MS.TrackOrigins)
1961 ConstantInt::get(MS.IntptrTy, ArgOffset),
1971 Value *getOriginPtrForRetval() {
1973 return MS.RetvalOriginTLS;
1978 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1979 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1984 if (!MS.TrackOrigins)
1986 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1987 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1988 OriginMap[
V] = Origin;
1992 Type *ShadowTy = getShadowTy(OrigTy);
2002 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
2011 getPoisonedShadow(AT->getElementType()));
2016 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
2017 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2025 Type *ShadowTy = getShadowTy(V);
2028 return getPoisonedShadow(ShadowTy);
2040 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2041 return getCleanShadow(V);
2043 Value *Shadow = ShadowMap[
V];
2045 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2046 assert(Shadow &&
"No shadow for a value");
2053 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2054 : getCleanShadow(V);
2060 Value *&ShadowPtr = ShadowMap[
V];
2065 unsigned ArgOffset = 0;
2066 const DataLayout &
DL =
F->getDataLayout();
2067 for (
auto &FArg :
F->args()) {
2068 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2070 ?
"vscale not fully supported\n"
2071 :
"Arg is not sized\n"));
2073 ShadowPtr = getCleanShadow(V);
2074 setOrigin(
A, getCleanOrigin());
2080 unsigned Size = FArg.hasByValAttr()
2081 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2082 :
DL.getTypeAllocSize(FArg.getType());
2086 if (FArg.hasByValAttr()) {
2090 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2091 FArg.getParamAlign(), FArg.getParamByValType());
2092 Value *CpShadowPtr, *CpOriginPtr;
2093 std::tie(CpShadowPtr, CpOriginPtr) =
2094 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2096 if (!PropagateShadow || Overflow) {
2098 EntryIRB.CreateMemSet(
2102 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2104 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2105 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2108 if (MS.TrackOrigins) {
2109 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2113 EntryIRB.CreateMemCpy(
2122 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2123 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2124 ShadowPtr = getCleanShadow(V);
2125 setOrigin(
A, getCleanOrigin());
2128 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2129 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2131 if (MS.TrackOrigins) {
2132 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2133 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2137 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2143 assert(ShadowPtr &&
"Could not find shadow for an argument");
2150 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2151 PoisonUndefVectors) {
2154 for (
unsigned i = 0; i != NumElems; ++i) {
2157 : getCleanShadow(Elem);
2161 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2162 << *ShadowConstant <<
"\n");
2164 return ShadowConstant;
2170 return getCleanShadow(V);
2174 Value *getShadow(Instruction *
I,
int i) {
2175 return getShadow(
I->getOperand(i));
2180 if (!MS.TrackOrigins)
2183 return getCleanOrigin();
2185 "Unexpected value type in getOrigin()");
2187 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2188 return getCleanOrigin();
2190 Value *Origin = OriginMap[
V];
2191 assert(Origin &&
"Missing origin");
2196 Value *getOrigin(Instruction *
I,
int i) {
2197 return getOrigin(
I->getOperand(i));
2204 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2210 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2211 << *OrigIns <<
"\n");
2216 if (isScalableNonVectorType(ShadowTy)) {
2217 LLVM_DEBUG(
dbgs() <<
"Skipping check of scalable non-vector " << *Shadow
2218 <<
" before " << *OrigIns <<
"\n");
2224 "Can only insert checks for integer, vector, and aggregate shadow "
2227 InstrumentationList.push_back(
2228 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2236 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2238 Value *Shadow, *Origin;
2240 Shadow = getShadow(Val);
2243 Origin = getOrigin(Val);
2250 insertCheckShadow(Shadow, Origin, OrigIns);
2255 case AtomicOrdering::NotAtomic:
2256 return AtomicOrdering::NotAtomic;
2257 case AtomicOrdering::Unordered:
2258 case AtomicOrdering::Monotonic:
2259 case AtomicOrdering::Release:
2260 return AtomicOrdering::Release;
2261 case AtomicOrdering::Acquire:
2262 case AtomicOrdering::AcquireRelease:
2263 return AtomicOrdering::AcquireRelease;
2264 case AtomicOrdering::SequentiallyConsistent:
2265 return AtomicOrdering::SequentiallyConsistent;
2271 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2272 uint32_t OrderingTable[NumOrderings] = {};
2274 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2275 OrderingTable[(
int)AtomicOrderingCABI::release] =
2276 (int)AtomicOrderingCABI::release;
2277 OrderingTable[(int)AtomicOrderingCABI::consume] =
2278 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2279 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2280 (
int)AtomicOrderingCABI::acq_rel;
2281 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2282 (
int)AtomicOrderingCABI::seq_cst;
2289 case AtomicOrdering::NotAtomic:
2290 return AtomicOrdering::NotAtomic;
2291 case AtomicOrdering::Unordered:
2292 case AtomicOrdering::Monotonic:
2293 case AtomicOrdering::Acquire:
2294 return AtomicOrdering::Acquire;
2295 case AtomicOrdering::Release:
2296 case AtomicOrdering::AcquireRelease:
2297 return AtomicOrdering::AcquireRelease;
2298 case AtomicOrdering::SequentiallyConsistent:
2299 return AtomicOrdering::SequentiallyConsistent;
2305 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2306 uint32_t OrderingTable[NumOrderings] = {};
2308 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2309 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2310 OrderingTable[(int)AtomicOrderingCABI::consume] =
2311 (
int)AtomicOrderingCABI::acquire;
2312 OrderingTable[(int)AtomicOrderingCABI::release] =
2313 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2314 (int)AtomicOrderingCABI::acq_rel;
2315 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2316 (
int)AtomicOrderingCABI::seq_cst;
2322 using InstVisitor<MemorySanitizerVisitor>
::visit;
2323 void visit(Instruction &
I) {
2324 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2327 if (isInPrologue(
I))
2332 setShadow(&
I, getCleanShadow(&
I));
2333 setOrigin(&
I, getCleanOrigin());
2344 void visitLoadInst(LoadInst &
I) {
2345 assert(
I.getType()->isSized() &&
"Load type must have size");
2346 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2347 NextNodeIRBuilder IRB(&
I);
2348 Type *ShadowTy = getShadowTy(&
I);
2349 Value *Addr =
I.getPointerOperand();
2350 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2351 const Align Alignment =
I.getAlign();
2352 if (PropagateShadow) {
2353 std::tie(ShadowPtr, OriginPtr) =
2354 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2358 setShadow(&
I, getCleanShadow(&
I));
2362 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2367 if (MS.TrackOrigins) {
2368 if (PropagateShadow) {
2373 setOrigin(&
I, getCleanOrigin());
2382 void visitStoreInst(StoreInst &
I) {
2383 StoreList.push_back(&
I);
2385 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2388 void handleCASOrRMW(Instruction &
I) {
2392 Value *Addr =
I.getOperand(0);
2393 Value *Val =
I.getOperand(1);
2394 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2399 insertCheckShadowOf(Addr, &
I);
2405 insertCheckShadowOf(Val, &
I);
2409 setShadow(&
I, getCleanShadow(&
I));
2410 setOrigin(&
I, getCleanOrigin());
2413 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2418 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2424 void visitExtractElementInst(ExtractElementInst &
I) {
2425 insertCheckShadowOf(
I.getOperand(1), &
I);
2429 setOrigin(&
I, getOrigin(&
I, 0));
2432 void visitInsertElementInst(InsertElementInst &
I) {
2433 insertCheckShadowOf(
I.getOperand(2), &
I);
2435 auto *Shadow0 = getShadow(&
I, 0);
2436 auto *Shadow1 = getShadow(&
I, 1);
2439 setOriginForNaryOp(
I);
2442 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2444 auto *Shadow0 = getShadow(&
I, 0);
2445 auto *Shadow1 = getShadow(&
I, 1);
2448 setOriginForNaryOp(
I);
2452 void visitSExtInst(SExtInst &
I) {
2454 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2455 setOrigin(&
I, getOrigin(&
I, 0));
2458 void visitZExtInst(ZExtInst &
I) {
2460 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2461 setOrigin(&
I, getOrigin(&
I, 0));
2464 void visitTruncInst(TruncInst &
I) {
2466 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2467 setOrigin(&
I, getOrigin(&
I, 0));
2470 void visitBitCastInst(BitCastInst &
I) {
2475 if (CI->isMustTailCall())
2479 setOrigin(&
I, getOrigin(&
I, 0));
2482 void visitPtrToIntInst(PtrToIntInst &
I) {
2485 "_msprop_ptrtoint"));
2486 setOrigin(&
I, getOrigin(&
I, 0));
2489 void visitIntToPtrInst(IntToPtrInst &
I) {
2492 "_msprop_inttoptr"));
2493 setOrigin(&
I, getOrigin(&
I, 0));
2496 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2497 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2498 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2499 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2500 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2501 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2508 void visitAnd(BinaryOperator &
I) {
2516 Value *S2 = getShadow(&
I, 1);
2517 Value *V1 =
I.getOperand(0);
2518 Value *V2 =
I.getOperand(1);
2526 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2527 setOriginForNaryOp(
I);
2530 void visitOr(BinaryOperator &
I) {
2543 Value *S2 = getShadow(&
I, 1);
2544 Value *V1 =
I.getOperand(0);
2545 Value *V2 =
I.getOperand(1);
2564 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2568 setOriginForNaryOp(
I);
2586 template <
bool CombineShadow>
class Combiner {
2587 Value *Shadow =
nullptr;
2588 Value *Origin =
nullptr;
2590 MemorySanitizerVisitor *MSV;
2593 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2594 : IRB(IRB), MSV(MSV) {}
2598 if (CombineShadow) {
2603 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2604 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2608 if (MSV->MS.TrackOrigins) {
2615 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2616 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2626 Value *OpShadow = MSV->getShadow(V);
2627 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2628 return Add(OpShadow, OpOrigin);
2633 void Done(Instruction *
I) {
2634 if (CombineShadow) {
2636 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2637 MSV->setShadow(
I, Shadow);
2639 if (MSV->MS.TrackOrigins) {
2641 MSV->setOrigin(
I, Origin);
2647 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2648 if (MSV->MS.TrackOrigins) {
2655 using ShadowAndOriginCombiner = Combiner<true>;
2656 using OriginCombiner = Combiner<false>;
2659 void setOriginForNaryOp(Instruction &
I) {
2660 if (!MS.TrackOrigins)
2663 OriginCombiner OC(
this, IRB);
2664 for (Use &
Op :
I.operands())
2669 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2671 "Vector of pointers is not a valid shadow type");
2681 Type *srcTy =
V->getType();
2684 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2685 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2686 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2704 Type *ShadowTy = getShadowTy(V);
2705 if (
V->getType() == ShadowTy)
2707 if (
V->getType()->isPtrOrPtrVectorTy())
2714 void handleShadowOr(Instruction &
I) {
2716 ShadowAndOriginCombiner SC(
this, IRB);
2717 for (Use &
Op :
I.operands())
2744 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2745 unsigned Shards,
Value *VectorA,
Value *VectorB) {
2750 [[maybe_unused]]
unsigned TotalNumElems = NumElems;
2756 assert(NumElems % (ReductionFactor * Shards) == 0);
2761 for (
unsigned i = 0; i < ReductionFactor; i++) {
2762 SmallVector<int, 16>
Mask;
2764 for (
unsigned j = 0;
j < Shards;
j++) {
2765 unsigned Offset = NumElems / Shards *
j;
2767 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2771 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2796 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards) {
2797 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2799 assert(
I.getType()->isVectorTy());
2800 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2802 [[maybe_unused]] FixedVectorType *ParamType =
2806 [[maybe_unused]] FixedVectorType *
ReturnType =
2814 Value *FirstArgShadow = getShadow(&
I, 0);
2815 Value *SecondArgShadow =
nullptr;
2816 if (
I.arg_size() == 2)
2817 SecondArgShadow = getShadow(&
I, 1);
2819 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2820 FirstArgShadow, SecondArgShadow);
2822 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2824 setShadow(&
I, OrShadow);
2825 setOriginForNaryOp(
I);
2835 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards,
2836 int ReinterpretElemWidth) {
2837 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2839 assert(
I.getType()->isVectorTy());
2840 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2842 FixedVectorType *ParamType =
2847 [[maybe_unused]] FixedVectorType *
ReturnType =
2854 FixedVectorType *ReinterpretShadowTy =
nullptr;
2862 Value *FirstArgShadow = getShadow(&
I, 0);
2863 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2873 Value *SecondArgShadow =
nullptr;
2874 if (
I.arg_size() == 2) {
2875 SecondArgShadow = getShadow(&
I, 1);
2876 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2879 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2880 FirstArgShadow, SecondArgShadow);
2882 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2884 setShadow(&
I, OrShadow);
2885 setOriginForNaryOp(
I);
2888 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2899 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2905 Type *EltTy = VTy->getElementType();
2907 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2908 if (ConstantInt *Elt =
2910 const APInt &
V = Elt->getValue();
2911 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2912 Elements.push_back(ConstantInt::get(EltTy, V2));
2914 Elements.push_back(ConstantInt::get(EltTy, 1));
2920 const APInt &
V = Elt->getValue();
2921 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2922 ShadowMul = ConstantInt::get(Ty, V2);
2924 ShadowMul = ConstantInt::get(Ty, 1);
2930 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2931 setOrigin(&
I, getOrigin(OtherArg));
2934 void visitMul(BinaryOperator &
I) {
2937 if (constOp0 && !constOp1)
2938 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2939 else if (constOp1 && !constOp0)
2940 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2945 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2946 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2947 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2948 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2949 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2950 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2952 void handleIntegerDiv(Instruction &
I) {
2955 insertCheckShadowOf(
I.getOperand(1), &
I);
2956 setShadow(&
I, getShadow(&
I, 0));
2957 setOrigin(&
I, getOrigin(&
I, 0));
2960 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2961 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2962 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2963 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2967 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2968 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2974 void handleEqualityComparison(ICmpInst &
I) {
2978 Value *Sa = getShadow(
A);
2979 Value *Sb = getShadow(
B);
3005 setOriginForNaryOp(
I);
3013 void handleRelationalComparisonExact(ICmpInst &
I) {
3017 Value *Sa = getShadow(
A);
3018 Value *Sb = getShadow(
B);
3029 bool IsSigned =
I.isSigned();
3031 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
3041 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
3046 return std::make_pair(Min, Max);
3049 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3050 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3056 setOriginForNaryOp(
I);
3063 void handleSignedRelationalComparison(ICmpInst &
I) {
3068 op =
I.getOperand(0);
3069 pre =
I.getPredicate();
3071 op =
I.getOperand(1);
3072 pre =
I.getSwappedPredicate();
3085 setShadow(&
I, Shadow);
3086 setOrigin(&
I, getOrigin(
op));
3092 void visitICmpInst(ICmpInst &
I) {
3097 if (
I.isEquality()) {
3098 handleEqualityComparison(
I);
3104 handleRelationalComparisonExact(
I);
3108 handleSignedRelationalComparison(
I);
3114 handleRelationalComparisonExact(
I);
3121 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3123 void handleShift(BinaryOperator &
I) {
3128 Value *S2 = getShadow(&
I, 1);
3131 Value *V2 =
I.getOperand(1);
3133 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3134 setOriginForNaryOp(
I);
3137 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3138 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3139 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3141 void handleFunnelShift(IntrinsicInst &
I) {
3145 Value *S0 = getShadow(&
I, 0);
3147 Value *S2 = getShadow(&
I, 2);
3150 Value *V2 =
I.getOperand(2);
3153 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3154 setOriginForNaryOp(
I);
3167 void visitMemMoveInst(MemMoveInst &
I) {
3168 getShadow(
I.getArgOperand(1));
3171 {I.getArgOperand(0), I.getArgOperand(1),
3172 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3190 void visitMemCpyInst(MemCpyInst &
I) {
3191 getShadow(
I.getArgOperand(1));
3194 {I.getArgOperand(0), I.getArgOperand(1),
3195 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3200 void visitMemSetInst(MemSetInst &
I) {
3204 {I.getArgOperand(0),
3205 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3206 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3210 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3212 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3218 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3222 Value *Addr =
I.getArgOperand(0);
3223 Value *Shadow = getShadow(&
I, 1);
3224 Value *ShadowPtr, *OriginPtr;
3228 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3233 insertCheckShadowOf(Addr, &
I);
3236 if (MS.TrackOrigins)
3245 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3249 Value *Addr =
I.getArgOperand(0);
3251 Type *ShadowTy = getShadowTy(&
I);
3252 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3253 if (PropagateShadow) {
3257 std::tie(ShadowPtr, OriginPtr) =
3258 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3262 setShadow(&
I, getCleanShadow(&
I));
3266 insertCheckShadowOf(Addr, &
I);
3268 if (MS.TrackOrigins) {
3269 if (PropagateShadow)
3270 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3272 setOrigin(&
I, getCleanOrigin());
3292 [[maybe_unused]]
bool
3293 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3294 unsigned int trailingFlags) {
3295 Type *RetTy =
I.getType();
3299 unsigned NumArgOperands =
I.arg_size();
3300 assert(NumArgOperands >= trailingFlags);
3301 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3302 Type *Ty =
I.getArgOperand(i)->getType();
3308 ShadowAndOriginCombiner SC(
this, IRB);
3309 for (
unsigned i = 0; i < NumArgOperands; ++i)
3310 SC.Add(
I.getArgOperand(i));
3327 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3328 unsigned NumArgOperands =
I.arg_size();
3329 if (NumArgOperands == 0)
3332 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3333 I.getArgOperand(1)->getType()->isVectorTy() &&
3334 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3336 return handleVectorStoreIntrinsic(
I);
3339 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3340 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3342 return handleVectorLoadIntrinsic(
I);
3345 if (
I.doesNotAccessMemory())
3346 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3354 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3355 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3359 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3366 void handleInvariantGroup(IntrinsicInst &
I) {
3367 setShadow(&
I, getShadow(&
I, 0));
3368 setOrigin(&
I, getOrigin(&
I, 0));
3371 void handleLifetimeStart(IntrinsicInst &
I) {
3376 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3379 void handleBswap(IntrinsicInst &
I) {
3382 Type *OpType =
Op->getType();
3385 setOrigin(&
I, getOrigin(
Op));
3406 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3408 Value *Src =
I.getArgOperand(0);
3409 Value *SrcShadow = getShadow(Src);
3413 I.getType(),
I.getIntrinsicID(), {Src, False});
3415 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3418 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3420 Value *NotAllZeroShadow =
3422 Value *OutputShadow =
3423 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3429 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3432 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3434 setShadow(&
I, OutputShadow);
3435 setOriginForNaryOp(
I);
3445 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3449 Value *S0 = getShadow(&
I, 0);
3458 setShadow(&
I, OutShadow);
3459 setOriginForNaryOp(
I);
3468 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3488 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3493 Value *FullShadow = getCleanShadow(&
I);
3494 unsigned ShadowNumElems =
3496 unsigned FullShadowNumElems =
3499 assert((ShadowNumElems == FullShadowNumElems) ||
3500 (ShadowNumElems * 2 == FullShadowNumElems));
3502 if (ShadowNumElems == FullShadowNumElems) {
3503 FullShadow = Shadow;
3507 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3532 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3533 bool HasRoundingMode) {
3534 if (HasRoundingMode) {
3542 Value *Src =
I.getArgOperand(0);
3543 assert(Src->getType()->isVectorTy());
3547 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3550 Value *S0 = getShadow(&
I, 0);
3562 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3564 setShadow(&
I, FullShadow);
3565 setOriginForNaryOp(
I);
3586 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3587 bool HasRoundingMode =
false) {
3589 Value *CopyOp, *ConvertOp;
3591 assert((!HasRoundingMode ||
3593 "Invalid rounding mode");
3595 switch (
I.arg_size() - HasRoundingMode) {
3597 CopyOp =
I.getArgOperand(0);
3598 ConvertOp =
I.getArgOperand(1);
3601 ConvertOp =
I.getArgOperand(0);
3615 Value *ConvertShadow = getShadow(ConvertOp);
3616 Value *AggShadow =
nullptr;
3619 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3620 for (
int i = 1; i < NumUsedElements; ++i) {
3622 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3623 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3626 AggShadow = ConvertShadow;
3629 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3636 Value *ResultShadow = getShadow(CopyOp);
3638 for (
int i = 0; i < NumUsedElements; ++i) {
3640 ResultShadow, ConstantInt::getNullValue(EltTy),
3643 setShadow(&
I, ResultShadow);
3644 setOrigin(&
I, getOrigin(CopyOp));
3646 setShadow(&
I, getCleanShadow(&
I));
3647 setOrigin(&
I, getCleanOrigin());
3655 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3658 return CreateShadowCast(IRB, S2,
T,
true);
3666 return CreateShadowCast(IRB, S2,
T,
true);
3683 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3689 Value *S2 = getShadow(&
I, 1);
3691 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3692 Value *V1 =
I.getOperand(0);
3693 Value *V2 =
I.getOperand(1);
3695 {IRB.CreateBitCast(S1, V1->getType()), V2});
3697 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3698 setOriginForNaryOp(
I);
3703 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3704 unsigned X86_MMXSizeInBits = 64) {
3705 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3706 "Illegal MMX vector element size");
3708 X86_MMXSizeInBits / EltSizeInBits);
3715 case Intrinsic::x86_sse2_packsswb_128:
3716 case Intrinsic::x86_sse2_packuswb_128:
3717 return Intrinsic::x86_sse2_packsswb_128;
3719 case Intrinsic::x86_sse2_packssdw_128:
3720 case Intrinsic::x86_sse41_packusdw:
3721 return Intrinsic::x86_sse2_packssdw_128;
3723 case Intrinsic::x86_avx2_packsswb:
3724 case Intrinsic::x86_avx2_packuswb:
3725 return Intrinsic::x86_avx2_packsswb;
3727 case Intrinsic::x86_avx2_packssdw:
3728 case Intrinsic::x86_avx2_packusdw:
3729 return Intrinsic::x86_avx2_packssdw;
3731 case Intrinsic::x86_mmx_packsswb:
3732 case Intrinsic::x86_mmx_packuswb:
3733 return Intrinsic::x86_mmx_packsswb;
3735 case Intrinsic::x86_mmx_packssdw:
3736 return Intrinsic::x86_mmx_packssdw;
3738 case Intrinsic::x86_avx512_packssdw_512:
3739 case Intrinsic::x86_avx512_packusdw_512:
3740 return Intrinsic::x86_avx512_packssdw_512;
3742 case Intrinsic::x86_avx512_packsswb_512:
3743 case Intrinsic::x86_avx512_packuswb_512:
3744 return Intrinsic::x86_avx512_packsswb_512;
3760 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3761 unsigned MMXEltSizeInBits = 0) {
3765 Value *S2 = getShadow(&
I, 1);
3766 assert(
S1->getType()->isVectorTy());
3772 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3773 if (MMXEltSizeInBits) {
3781 if (MMXEltSizeInBits) {
3787 {S1_ext, S2_ext},
nullptr,
3788 "_msprop_vector_pack");
3789 if (MMXEltSizeInBits)
3792 setOriginForNaryOp(
I);
3796 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3809 const unsigned Width =
3816 Value *DstMaskV = createDppMask(Width, DstMask);
3833 void handleDppIntrinsic(IntrinsicInst &
I) {
3836 Value *S0 = getShadow(&
I, 0);
3840 const unsigned Width =
3842 assert(Width == 2 || Width == 4 || Width == 8);
3845 const unsigned SrcMask =
Mask >> 4;
3846 const unsigned DstMask =
Mask & 0xf;
3849 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3854 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3861 setOriginForNaryOp(
I);
3865 C = CreateAppToShadowCast(IRB,
C);
3874 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3879 Value *Sc = getShadow(&
I, 2);
3880 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3885 C = convertBlendvToSelectMask(IRB,
C);
3886 Sc = convertBlendvToSelectMask(IRB, Sc);
3892 handleSelectLikeInst(
I,
C,
T,
F);
3896 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3897 const unsigned SignificantBitsPerResultElement = 16;
3899 unsigned ZeroBitsPerResultElement =
3903 auto *Shadow0 = getShadow(&
I, 0);
3904 auto *Shadow1 = getShadow(&
I, 1);
3909 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3912 setOriginForNaryOp(
I);
3934 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3936 unsigned EltSizeInBits = 0) {
3939 [[maybe_unused]] FixedVectorType *
ReturnType =
3944 Value *Va =
nullptr;
3945 Value *Vb =
nullptr;
3946 Value *Sa =
nullptr;
3947 Value *Sb =
nullptr;
3949 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3950 if (
I.arg_size() == 2) {
3951 Va =
I.getOperand(0);
3952 Vb =
I.getOperand(1);
3954 Sa = getShadow(&
I, 0);
3955 Sb = getShadow(&
I, 1);
3956 }
else if (
I.arg_size() == 3) {
3958 Va =
I.getOperand(1);
3959 Vb =
I.getOperand(2);
3961 Sa = getShadow(&
I, 1);
3962 Sb = getShadow(&
I, 2);
3971 if (
I.arg_size() == 3) {
3972 [[maybe_unused]]
auto *AccumulatorType =
3974 assert(AccumulatorType == ReturnType);
3977 FixedVectorType *ImplicitReturnType =
3980 if (EltSizeInBits) {
3982 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3994 ReturnType->getNumElements() * ReductionFactor);
4016 VaInt = CreateAppToShadowCast(IRB, Va);
4017 VbInt = CreateAppToShadowCast(IRB, Vb);
4027 And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
4049 ImplicitReturnType);
4054 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
4057 if (
I.arg_size() == 3)
4058 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
4060 setShadow(&
I, OutShadow);
4061 setOriginForNaryOp(
I);
4067 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4069 Type *ResTy = getShadowTy(&
I);
4070 auto *Shadow0 = getShadow(&
I, 0);
4071 auto *Shadow1 = getShadow(&
I, 1);
4076 setOriginForNaryOp(
I);
4082 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4084 auto *Shadow0 = getShadow(&
I, 0);
4085 auto *Shadow1 = getShadow(&
I, 1);
4087 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4089 setOriginForNaryOp(
I);
4098 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4103 if (AllowShadowCast)
4104 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4108 setOriginForNaryOp(
I);
4118 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4122 Value *Shadow0 = getShadow(&
I, 0);
4128 setOriginForNaryOp(
I);
4134 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4138 Value *OperandShadow = getShadow(&
I, 0);
4140 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4148 setOrigin(&
I, getOrigin(&
I, 0));
4154 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4158 Value *OperandShadow = getShadow(&
I, 0);
4159 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4167 setOrigin(&
I, getOrigin(&
I, 0));
4170 void handleStmxcsr(IntrinsicInst &
I) {
4172 Value *Addr =
I.getArgOperand(0);
4175 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4180 insertCheckShadowOf(Addr, &
I);
4183 void handleLdmxcsr(IntrinsicInst &
I) {
4188 Value *Addr =
I.getArgOperand(0);
4191 Value *ShadowPtr, *OriginPtr;
4192 std::tie(ShadowPtr, OriginPtr) =
4193 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4196 insertCheckShadowOf(Addr, &
I);
4199 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4201 insertCheckShadow(Shadow, Origin, &
I);
4204 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4206 Value *Ptr =
I.getArgOperand(0);
4207 MaybeAlign
Align =
I.getParamAlign(0);
4209 Value *PassThru =
I.getArgOperand(2);
4212 insertCheckShadowOf(Ptr, &
I);
4213 insertCheckShadowOf(Mask, &
I);
4216 if (!PropagateShadow) {
4217 setShadow(&
I, getCleanShadow(&
I));
4218 setOrigin(&
I, getCleanOrigin());
4222 Type *ShadowTy = getShadowTy(&
I);
4224 auto [ShadowPtr, OriginPtr] =
4225 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
false);
4229 getShadow(PassThru),
"_msmaskedexpload");
4231 setShadow(&
I, Shadow);
4234 setOrigin(&
I, getCleanOrigin());
4237 void handleMaskedCompressStore(IntrinsicInst &
I) {
4239 Value *Values =
I.getArgOperand(0);
4240 Value *Ptr =
I.getArgOperand(1);
4241 MaybeAlign
Align =
I.getParamAlign(1);
4245 insertCheckShadowOf(Ptr, &
I);
4246 insertCheckShadowOf(Mask, &
I);
4249 Value *Shadow = getShadow(Values);
4250 Type *ElementShadowTy =
4252 auto [ShadowPtr, OriginPtrs] =
4253 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
true);
4260 void handleMaskedGather(IntrinsicInst &
I) {
4262 Value *Ptrs =
I.getArgOperand(0);
4263 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4265 Value *PassThru =
I.getArgOperand(2);
4267 Type *PtrsShadowTy = getShadowTy(Ptrs);
4269 insertCheckShadowOf(Mask, &
I);
4273 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4276 if (!PropagateShadow) {
4277 setShadow(&
I, getCleanShadow(&
I));
4278 setOrigin(&
I, getCleanOrigin());
4282 Type *ShadowTy = getShadowTy(&
I);
4284 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4285 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4289 getShadow(PassThru),
"_msmaskedgather");
4291 setShadow(&
I, Shadow);
4294 setOrigin(&
I, getCleanOrigin());
4297 void handleMaskedScatter(IntrinsicInst &
I) {
4299 Value *Values =
I.getArgOperand(0);
4300 Value *Ptrs =
I.getArgOperand(1);
4301 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4304 Type *PtrsShadowTy = getShadowTy(Ptrs);
4306 insertCheckShadowOf(Mask, &
I);
4310 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4313 Value *Shadow = getShadow(Values);
4314 Type *ElementShadowTy =
4316 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4317 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4328 void handleMaskedStore(IntrinsicInst &
I) {
4330 Value *
V =
I.getArgOperand(0);
4331 Value *Ptr =
I.getArgOperand(1);
4332 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4334 Value *Shadow = getShadow(V);
4337 insertCheckShadowOf(Ptr, &
I);
4338 insertCheckShadowOf(Mask, &
I);
4343 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4344 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4348 if (!MS.TrackOrigins)
4351 auto &
DL =
F.getDataLayout();
4352 paintOrigin(IRB, getOrigin(V), OriginPtr,
4361 void handleMaskedLoad(IntrinsicInst &
I) {
4363 Value *Ptr =
I.getArgOperand(0);
4364 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4366 Value *PassThru =
I.getArgOperand(2);
4369 insertCheckShadowOf(Ptr, &
I);
4370 insertCheckShadowOf(Mask, &
I);
4373 if (!PropagateShadow) {
4374 setShadow(&
I, getCleanShadow(&
I));
4375 setOrigin(&
I, getCleanOrigin());
4379 Type *ShadowTy = getShadowTy(&
I);
4380 Value *ShadowPtr, *OriginPtr;
4381 std::tie(ShadowPtr, OriginPtr) =
4382 getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment,
false);
4384 getShadow(PassThru),
"_msmaskedld"));
4386 if (!MS.TrackOrigins)
4393 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4398 setOrigin(&
I, Origin);
4414 void handleAVXMaskedStore(IntrinsicInst &
I) {
4419 Value *Dst =
I.getArgOperand(0);
4420 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4425 Value *Src =
I.getArgOperand(2);
4430 Value *SrcShadow = getShadow(Src);
4433 insertCheckShadowOf(Dst, &
I);
4434 insertCheckShadowOf(Mask, &
I);
4437 Value *DstShadowPtr;
4438 Value *DstOriginPtr;
4439 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4440 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4442 SmallVector<Value *, 2> ShadowArgs;
4443 ShadowArgs.
append(1, DstShadowPtr);
4444 ShadowArgs.
append(1, Mask);
4455 if (!MS.TrackOrigins)
4459 auto &
DL =
F.getDataLayout();
4460 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4461 DL.getTypeStoreSize(SrcShadow->
getType()),
4480 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4485 Value *Src =
I.getArgOperand(0);
4486 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4494 insertCheckShadowOf(Mask, &
I);
4497 Type *SrcShadowTy = getShadowTy(Src);
4498 Value *SrcShadowPtr, *SrcOriginPtr;
4499 std::tie(SrcShadowPtr, SrcOriginPtr) =
4500 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4502 SmallVector<Value *, 2> ShadowArgs;
4503 ShadowArgs.
append(1, SrcShadowPtr);
4504 ShadowArgs.
append(1, Mask);
4513 if (!MS.TrackOrigins)
4520 setOrigin(&
I, PtrSrcOrigin);
4529 assert(isFixedIntVector(Idx));
4530 auto IdxVectorSize =
4538 auto *IdxShadow = getShadow(Idx);
4543 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4548 void handleAVXVpermilvar(IntrinsicInst &
I) {
4550 Value *Shadow = getShadow(&
I, 0);
4551 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4555 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4557 {Shadow, I.getArgOperand(1)});
4560 setOriginForNaryOp(
I);
4565 void handleAVXVpermi2var(IntrinsicInst &
I) {
4570 [[maybe_unused]]
auto ArgVectorSize =
4573 ->getNumElements() == ArgVectorSize);
4575 ->getNumElements() == ArgVectorSize);
4576 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4577 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4578 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4580 Value *AShadow = getShadow(&
I, 0);
4581 Value *Idx =
I.getArgOperand(1);
4582 Value *BShadow = getShadow(&
I, 2);
4584 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4588 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4589 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4591 {AShadow, Idx, BShadow});
4593 setOriginForNaryOp(
I);
4596 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4600 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4604 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4605 return isFixedIntVectorTy(
V->getType());
4608 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4609 return isFixedFPVectorTy(
V->getType());
4631 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4636 Value *WriteThrough;
4640 WriteThrough =
I.getOperand(2);
4641 Mask =
I.getOperand(3);
4644 WriteThrough =
I.getOperand(1);
4645 Mask =
I.getOperand(2);
4650 assert(isFixedIntVector(WriteThrough));
4652 unsigned ANumElements =
4654 [[maybe_unused]]
unsigned WriteThruNumElements =
4656 assert(ANumElements == WriteThruNumElements ||
4657 ANumElements * 2 == WriteThruNumElements);
4660 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4661 assert(ANumElements == MaskNumElements ||
4662 ANumElements * 2 == MaskNumElements);
4664 assert(WriteThruNumElements == MaskNumElements);
4668 insertCheckShadowOf(Mask, &
I);
4678 Value *AShadow = getShadow(
A);
4679 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4681 if (ANumElements * 2 == MaskNumElements) {
4693 "_ms_mask_bitcast");
4703 getShadowTy(&
I),
"_ms_a_shadow");
4705 Value *WriteThroughShadow = getShadow(WriteThrough);
4707 "_ms_writethru_select");
4709 setShadow(&
I, Shadow);
4710 setOriginForNaryOp(
I);
4718 void handleBmiIntrinsic(IntrinsicInst &
I) {
4720 Type *ShadowTy = getShadowTy(&
I);
4723 Value *SMask = getShadow(&
I, 1);
4728 {getShadow(&I, 0), I.getOperand(1)});
4731 setOriginForNaryOp(
I);
4734 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4735 SmallVector<int, 8>
Mask;
4736 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4750 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4755 "pclmul 3rd operand must be a constant");
4758 getPclmulMask(Width, Imm & 0x01));
4760 getPclmulMask(Width, Imm & 0x10));
4761 ShadowAndOriginCombiner SOC(
this, IRB);
4762 SOC.Add(Shuf0, getOrigin(&
I, 0));
4763 SOC.Add(Shuf1, getOrigin(&
I, 1));
4768 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4773 Value *Second = getShadow(&
I, 1);
4775 SmallVector<int, 16>
Mask;
4776 Mask.push_back(Width);
4777 for (
unsigned i = 1; i < Width; i++)
4781 setShadow(&
I, Shadow);
4782 setOriginForNaryOp(
I);
4785 void handleVtestIntrinsic(IntrinsicInst &
I) {
4787 Value *Shadow0 = getShadow(&
I, 0);
4788 Value *Shadow1 = getShadow(&
I, 1);
4794 setShadow(&
I, Shadow);
4795 setOriginForNaryOp(
I);
4798 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4803 Value *Second = getShadow(&
I, 1);
4806 SmallVector<int, 16>
Mask;
4807 Mask.push_back(Width);
4808 for (
unsigned i = 1; i < Width; i++)
4812 setShadow(&
I, Shadow);
4813 setOriginForNaryOp(
I);
4819 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4820 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4825 ShadowAndOriginCombiner SC(
this, IRB);
4826 SC.Add(
I.getArgOperand(0));
4834 void handleAbsIntrinsic(IntrinsicInst &
I) {
4836 Value *Src =
I.getArgOperand(0);
4837 Value *IsIntMinPoison =
I.getArgOperand(1);
4839 assert(
I.getType()->isIntOrIntVectorTy());
4841 assert(Src->getType() ==
I.getType());
4847 Value *SrcShadow = getShadow(Src);
4851 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4854 Value *PoisonedShadow = getPoisonedShadow(Src);
4855 Value *PoisonedIfIntMinShadow =
4858 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4860 setShadow(&
I, Shadow);
4861 setOrigin(&
I, getOrigin(&
I, 0));
4864 void handleIsFpClass(IntrinsicInst &
I) {
4866 Value *Shadow = getShadow(&
I, 0);
4867 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4868 setOrigin(&
I, getOrigin(&
I, 0));
4871 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4873 Value *Shadow0 = getShadow(&
I, 0);
4874 Value *Shadow1 = getShadow(&
I, 1);
4877 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4883 setShadow(&
I, Shadow);
4884 setOriginForNaryOp(
I);
4890 Value *Shadow = getShadow(V);
4912 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4917 Value *WriteThrough =
I.getOperand(1);
4921 assert(isFixedIntVector(WriteThrough));
4923 unsigned ANumElements =
4925 unsigned OutputNumElements =
4927 assert(ANumElements == OutputNumElements ||
4928 ANumElements * 2 == OutputNumElements);
4931 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4932 insertCheckShadowOf(Mask, &
I);
4943 if (ANumElements != OutputNumElements) {
4945 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4952 Value *AShadow = getShadow(
A);
4956 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4966 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4967 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4969 Value *WriteThroughShadow = getShadow(WriteThrough);
4972 setShadow(&
I, Shadow);
4973 setOriginForNaryOp(
I);
5000 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
5001 unsigned WriteThruIndex,
5002 unsigned MaskIndex) {
5005 unsigned NumArgs =
I.arg_size();
5006 assert(AIndex < NumArgs);
5007 assert(WriteThruIndex < NumArgs);
5008 assert(MaskIndex < NumArgs);
5009 assert(AIndex != WriteThruIndex);
5010 assert(AIndex != MaskIndex);
5011 assert(WriteThruIndex != MaskIndex);
5013 Value *
A =
I.getOperand(AIndex);
5014 Value *WriteThru =
I.getOperand(WriteThruIndex);
5018 assert(isFixedFPVector(WriteThru));
5020 [[maybe_unused]]
unsigned ANumElements =
5022 unsigned OutputNumElements =
5024 assert(ANumElements == OutputNumElements);
5026 for (
unsigned i = 0; i < NumArgs; ++i) {
5027 if (i != AIndex && i != WriteThruIndex) {
5030 assert(
I.getOperand(i)->getType()->isIntegerTy());
5031 insertCheckShadowOf(
I.getOperand(i), &
I);
5036 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
5038 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
5045 Value *AShadow = getShadow(
A);
5051 Value *WriteThruShadow = getShadow(WriteThru);
5054 setShadow(&
I, Shadow);
5056 setOriginForNaryOp(
I);
5066 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5072 Value *WriteThrough =
I.getOperand(2);
5079 insertCheckShadowOf(Mask, &
I);
5083 unsigned NumElements =
5085 assert(NumElements == 8);
5086 assert(
A->getType() ==
B->getType());
5088 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5091 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5092 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5094 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5096 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5103 Value *AShadow = getShadow(
A);
5104 Value *DstLowerShadow =
5105 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5107 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5110 setShadow(&
I, DstShadow);
5111 setOriginForNaryOp(
I);
5141 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5152 ->getScalarSizeInBits() == 8);
5154 assert(
A->getType() ==
X->getType());
5156 assert(
B->getType()->isIntegerTy());
5157 assert(
B->getType()->getScalarSizeInBits() == 8);
5159 assert(
I.getType() ==
A->getType());
5161 Value *AShadow = getShadow(
A);
5162 Value *XShadow = getShadow(
X);
5163 Value *BZeroShadow = getCleanShadow(
B);
5166 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5168 {X, AShadow, BZeroShadow});
5170 {XShadow, A, BZeroShadow});
5173 Value *BShadow = getShadow(
B);
5174 Value *BBroadcastShadow = getCleanShadow(AShadow);
5179 for (
unsigned i = 0; i < NumElements; i++)
5183 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5184 setOriginForNaryOp(
I);
5198 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5199 unsigned int numArgs =
I.arg_size();
5202 assert(
I.getType()->isStructTy());
5212 assert(4 <= numArgs && numArgs <= 6);
5226 for (
unsigned int i = 0; i < numArgs - 2; i++)
5227 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5230 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5234 insertCheckShadowOf(LaneNumber, &
I);
5237 Value *Src =
I.getArgOperand(numArgs - 1);
5238 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5240 Type *SrcShadowTy = getShadowTy(Src);
5241 auto [SrcShadowPtr, SrcOriginPtr] =
5242 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5252 if (!MS.TrackOrigins)
5256 setOrigin(&
I, PtrSrcOrigin);
5273 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5277 int numArgOperands =
I.arg_size();
5280 assert(numArgOperands >= 1);
5281 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5283 int skipTrailingOperands = 1;
5286 insertCheckShadowOf(Addr, &
I);
5290 skipTrailingOperands++;
5291 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5293 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5296 SmallVector<Value *, 8> ShadowArgs;
5298 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5300 Value *Shadow = getShadow(&
I, i);
5301 ShadowArgs.
append(1, Shadow);
5318 (numArgOperands - skipTrailingOperands));
5319 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5323 I.getArgOperand(numArgOperands - skipTrailingOperands));
5325 Value *OutputShadowPtr, *OutputOriginPtr;
5327 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5328 Addr, IRB, OutputShadowTy,
Align(1),
true);
5329 ShadowArgs.
append(1, OutputShadowPtr);
5335 if (MS.TrackOrigins) {
5343 OriginCombiner OC(
this, IRB);
5344 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5345 OC.Add(
I.getArgOperand(i));
5347 const DataLayout &
DL =
F.getDataLayout();
5348 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5375 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5377 unsigned int trailingVerbatimArgs) {
5380 assert(trailingVerbatimArgs <
I.arg_size());
5382 SmallVector<Value *, 8> ShadowArgs;
5384 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5385 Value *Shadow = getShadow(&
I, i);
5393 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5395 Value *Arg =
I.getArgOperand(i);
5401 Value *CombinedShadow = CI;
5404 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5407 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5408 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5413 setOriginForNaryOp(
I);
5419 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5425 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5426 switch (
I.getIntrinsicID()) {
5427 case Intrinsic::uadd_with_overflow:
5428 case Intrinsic::sadd_with_overflow:
5429 case Intrinsic::usub_with_overflow:
5430 case Intrinsic::ssub_with_overflow:
5431 case Intrinsic::umul_with_overflow:
5432 case Intrinsic::smul_with_overflow:
5433 handleArithmeticWithOverflow(
I);
5435 case Intrinsic::abs:
5436 handleAbsIntrinsic(
I);
5438 case Intrinsic::bitreverse:
5439 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5442 case Intrinsic::is_fpclass:
5445 case Intrinsic::lifetime_start:
5446 handleLifetimeStart(
I);
5448 case Intrinsic::launder_invariant_group:
5449 case Intrinsic::strip_invariant_group:
5450 handleInvariantGroup(
I);
5452 case Intrinsic::bswap:
5455 case Intrinsic::ctlz:
5456 case Intrinsic::cttz:
5457 handleCountLeadingTrailingZeros(
I);
5459 case Intrinsic::masked_compressstore:
5460 handleMaskedCompressStore(
I);
5462 case Intrinsic::masked_expandload:
5463 handleMaskedExpandLoad(
I);
5465 case Intrinsic::masked_gather:
5466 handleMaskedGather(
I);
5468 case Intrinsic::masked_scatter:
5469 handleMaskedScatter(
I);
5471 case Intrinsic::masked_store:
5472 handleMaskedStore(
I);
5474 case Intrinsic::masked_load:
5475 handleMaskedLoad(
I);
5477 case Intrinsic::vector_reduce_and:
5478 handleVectorReduceAndIntrinsic(
I);
5480 case Intrinsic::vector_reduce_or:
5481 handleVectorReduceOrIntrinsic(
I);
5484 case Intrinsic::vector_reduce_add:
5485 case Intrinsic::vector_reduce_xor:
5486 case Intrinsic::vector_reduce_mul:
5489 case Intrinsic::vector_reduce_smax:
5490 case Intrinsic::vector_reduce_smin:
5491 case Intrinsic::vector_reduce_umax:
5492 case Intrinsic::vector_reduce_umin:
5495 case Intrinsic::vector_reduce_fmax:
5496 case Intrinsic::vector_reduce_fmin:
5497 handleVectorReduceIntrinsic(
I,
false);
5500 case Intrinsic::vector_reduce_fadd:
5501 case Intrinsic::vector_reduce_fmul:
5502 handleVectorReduceWithStarterIntrinsic(
I);
5505 case Intrinsic::scmp:
5506 case Intrinsic::ucmp: {
5511 case Intrinsic::fshl:
5512 case Intrinsic::fshr:
5513 handleFunnelShift(
I);
5516 case Intrinsic::is_constant:
5518 setShadow(&
I, getCleanShadow(&
I));
5519 setOrigin(&
I, getCleanOrigin());
5529 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5530 switch (
I.getIntrinsicID()) {
5531 case Intrinsic::x86_sse_stmxcsr:
5534 case Intrinsic::x86_sse_ldmxcsr:
5541 case Intrinsic::x86_avx512_vcvtsd2usi64:
5542 case Intrinsic::x86_avx512_vcvtsd2usi32:
5543 case Intrinsic::x86_avx512_vcvtss2usi64:
5544 case Intrinsic::x86_avx512_vcvtss2usi32:
5545 case Intrinsic::x86_avx512_cvttss2usi64:
5546 case Intrinsic::x86_avx512_cvttss2usi:
5547 case Intrinsic::x86_avx512_cvttsd2usi64:
5548 case Intrinsic::x86_avx512_cvttsd2usi:
5549 case Intrinsic::x86_avx512_cvtusi2ss:
5550 case Intrinsic::x86_avx512_cvtusi642sd:
5551 case Intrinsic::x86_avx512_cvtusi642ss:
5552 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5554 case Intrinsic::x86_sse2_cvtsd2si64:
5555 case Intrinsic::x86_sse2_cvtsd2si:
5556 case Intrinsic::x86_sse2_cvtsd2ss:
5557 case Intrinsic::x86_sse2_cvttsd2si64:
5558 case Intrinsic::x86_sse2_cvttsd2si:
5559 case Intrinsic::x86_sse_cvtss2si64:
5560 case Intrinsic::x86_sse_cvtss2si:
5561 case Intrinsic::x86_sse_cvttss2si64:
5562 case Intrinsic::x86_sse_cvttss2si:
5563 handleSSEVectorConvertIntrinsic(
I, 1);
5565 case Intrinsic::x86_sse_cvtps2pi:
5566 case Intrinsic::x86_sse_cvttps2pi:
5567 handleSSEVectorConvertIntrinsic(
I, 2);
5575 case Intrinsic::x86_vcvtps2ph_128:
5576 case Intrinsic::x86_vcvtps2ph_256: {
5577 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5586 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5587 handleAVX512VectorConvertFPToInt(
I,
false);
5592 case Intrinsic::x86_sse2_cvtpd2ps:
5593 case Intrinsic::x86_sse2_cvtps2dq:
5594 case Intrinsic::x86_sse2_cvtpd2dq:
5595 case Intrinsic::x86_sse2_cvttps2dq:
5596 case Intrinsic::x86_sse2_cvttpd2dq:
5597 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5598 case Intrinsic::x86_avx_cvt_ps2dq_256:
5599 case Intrinsic::x86_avx_cvt_pd2dq_256:
5600 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5601 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5602 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5613 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5614 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5615 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5616 handleAVX512VectorConvertFPToInt(
I,
true);
5620 case Intrinsic::x86_avx512_psll_w_512:
5621 case Intrinsic::x86_avx512_psll_d_512:
5622 case Intrinsic::x86_avx512_psll_q_512:
5623 case Intrinsic::x86_avx512_pslli_w_512:
5624 case Intrinsic::x86_avx512_pslli_d_512:
5625 case Intrinsic::x86_avx512_pslli_q_512:
5626 case Intrinsic::x86_avx512_psrl_w_512:
5627 case Intrinsic::x86_avx512_psrl_d_512:
5628 case Intrinsic::x86_avx512_psrl_q_512:
5629 case Intrinsic::x86_avx512_psra_w_512:
5630 case Intrinsic::x86_avx512_psra_d_512:
5631 case Intrinsic::x86_avx512_psra_q_512:
5632 case Intrinsic::x86_avx512_psrli_w_512:
5633 case Intrinsic::x86_avx512_psrli_d_512:
5634 case Intrinsic::x86_avx512_psrli_q_512:
5635 case Intrinsic::x86_avx512_psrai_w_512:
5636 case Intrinsic::x86_avx512_psrai_d_512:
5637 case Intrinsic::x86_avx512_psrai_q_512:
5638 case Intrinsic::x86_avx512_psra_q_256:
5639 case Intrinsic::x86_avx512_psra_q_128:
5640 case Intrinsic::x86_avx512_psrai_q_256:
5641 case Intrinsic::x86_avx512_psrai_q_128:
5642 case Intrinsic::x86_avx2_psll_w:
5643 case Intrinsic::x86_avx2_psll_d:
5644 case Intrinsic::x86_avx2_psll_q:
5645 case Intrinsic::x86_avx2_pslli_w:
5646 case Intrinsic::x86_avx2_pslli_d:
5647 case Intrinsic::x86_avx2_pslli_q:
5648 case Intrinsic::x86_avx2_psrl_w:
5649 case Intrinsic::x86_avx2_psrl_d:
5650 case Intrinsic::x86_avx2_psrl_q:
5651 case Intrinsic::x86_avx2_psra_w:
5652 case Intrinsic::x86_avx2_psra_d:
5653 case Intrinsic::x86_avx2_psrli_w:
5654 case Intrinsic::x86_avx2_psrli_d:
5655 case Intrinsic::x86_avx2_psrli_q:
5656 case Intrinsic::x86_avx2_psrai_w:
5657 case Intrinsic::x86_avx2_psrai_d:
5658 case Intrinsic::x86_sse2_psll_w:
5659 case Intrinsic::x86_sse2_psll_d:
5660 case Intrinsic::x86_sse2_psll_q:
5661 case Intrinsic::x86_sse2_pslli_w:
5662 case Intrinsic::x86_sse2_pslli_d:
5663 case Intrinsic::x86_sse2_pslli_q:
5664 case Intrinsic::x86_sse2_psrl_w:
5665 case Intrinsic::x86_sse2_psrl_d:
5666 case Intrinsic::x86_sse2_psrl_q:
5667 case Intrinsic::x86_sse2_psra_w:
5668 case Intrinsic::x86_sse2_psra_d:
5669 case Intrinsic::x86_sse2_psrli_w:
5670 case Intrinsic::x86_sse2_psrli_d:
5671 case Intrinsic::x86_sse2_psrli_q:
5672 case Intrinsic::x86_sse2_psrai_w:
5673 case Intrinsic::x86_sse2_psrai_d:
5674 case Intrinsic::x86_mmx_psll_w:
5675 case Intrinsic::x86_mmx_psll_d:
5676 case Intrinsic::x86_mmx_psll_q:
5677 case Intrinsic::x86_mmx_pslli_w:
5678 case Intrinsic::x86_mmx_pslli_d:
5679 case Intrinsic::x86_mmx_pslli_q:
5680 case Intrinsic::x86_mmx_psrl_w:
5681 case Intrinsic::x86_mmx_psrl_d:
5682 case Intrinsic::x86_mmx_psrl_q:
5683 case Intrinsic::x86_mmx_psra_w:
5684 case Intrinsic::x86_mmx_psra_d:
5685 case Intrinsic::x86_mmx_psrli_w:
5686 case Intrinsic::x86_mmx_psrli_d:
5687 case Intrinsic::x86_mmx_psrli_q:
5688 case Intrinsic::x86_mmx_psrai_w:
5689 case Intrinsic::x86_mmx_psrai_d:
5690 handleVectorShiftIntrinsic(
I,
false);
5692 case Intrinsic::x86_avx2_psllv_d:
5693 case Intrinsic::x86_avx2_psllv_d_256:
5694 case Intrinsic::x86_avx512_psllv_d_512:
5695 case Intrinsic::x86_avx2_psllv_q:
5696 case Intrinsic::x86_avx2_psllv_q_256:
5697 case Intrinsic::x86_avx512_psllv_q_512:
5698 case Intrinsic::x86_avx2_psrlv_d:
5699 case Intrinsic::x86_avx2_psrlv_d_256:
5700 case Intrinsic::x86_avx512_psrlv_d_512:
5701 case Intrinsic::x86_avx2_psrlv_q:
5702 case Intrinsic::x86_avx2_psrlv_q_256:
5703 case Intrinsic::x86_avx512_psrlv_q_512:
5704 case Intrinsic::x86_avx2_psrav_d:
5705 case Intrinsic::x86_avx2_psrav_d_256:
5706 case Intrinsic::x86_avx512_psrav_d_512:
5707 case Intrinsic::x86_avx512_psrav_q_128:
5708 case Intrinsic::x86_avx512_psrav_q_256:
5709 case Intrinsic::x86_avx512_psrav_q_512:
5710 handleVectorShiftIntrinsic(
I,
true);
5714 case Intrinsic::x86_sse2_packsswb_128:
5715 case Intrinsic::x86_sse2_packssdw_128:
5716 case Intrinsic::x86_sse2_packuswb_128:
5717 case Intrinsic::x86_sse41_packusdw:
5718 case Intrinsic::x86_avx2_packsswb:
5719 case Intrinsic::x86_avx2_packssdw:
5720 case Intrinsic::x86_avx2_packuswb:
5721 case Intrinsic::x86_avx2_packusdw:
5727 case Intrinsic::x86_avx512_packsswb_512:
5728 case Intrinsic::x86_avx512_packssdw_512:
5729 case Intrinsic::x86_avx512_packuswb_512:
5730 case Intrinsic::x86_avx512_packusdw_512:
5731 handleVectorPackIntrinsic(
I);
5734 case Intrinsic::x86_sse41_pblendvb:
5735 case Intrinsic::x86_sse41_blendvpd:
5736 case Intrinsic::x86_sse41_blendvps:
5737 case Intrinsic::x86_avx_blendv_pd_256:
5738 case Intrinsic::x86_avx_blendv_ps_256:
5739 case Intrinsic::x86_avx2_pblendvb:
5740 handleBlendvIntrinsic(
I);
5743 case Intrinsic::x86_avx_dp_ps_256:
5744 case Intrinsic::x86_sse41_dppd:
5745 case Intrinsic::x86_sse41_dpps:
5746 handleDppIntrinsic(
I);
5749 case Intrinsic::x86_mmx_packsswb:
5750 case Intrinsic::x86_mmx_packuswb:
5751 handleVectorPackIntrinsic(
I, 16);
5754 case Intrinsic::x86_mmx_packssdw:
5755 handleVectorPackIntrinsic(
I, 32);
5758 case Intrinsic::x86_mmx_psad_bw:
5759 handleVectorSadIntrinsic(
I,
true);
5761 case Intrinsic::x86_sse2_psad_bw:
5762 case Intrinsic::x86_avx2_psad_bw:
5763 handleVectorSadIntrinsic(
I);
5789 case Intrinsic::x86_sse2_pmadd_wd:
5790 case Intrinsic::x86_avx2_pmadd_wd:
5791 case Intrinsic::x86_avx512_pmaddw_d_512:
5792 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5793 case Intrinsic::x86_avx2_pmadd_ub_sw:
5794 case Intrinsic::x86_avx512_pmaddubs_w_512:
5795 handleVectorPmaddIntrinsic(
I, 2,
5800 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5801 handleVectorPmaddIntrinsic(
I, 2,
5806 case Intrinsic::x86_mmx_pmadd_wd:
5807 handleVectorPmaddIntrinsic(
I, 2,
5903 case Intrinsic::x86_avx512_vpdpbusd_128:
5904 case Intrinsic::x86_avx512_vpdpbusd_256:
5905 case Intrinsic::x86_avx512_vpdpbusd_512:
5906 case Intrinsic::x86_avx512_vpdpbusds_128:
5907 case Intrinsic::x86_avx512_vpdpbusds_256:
5908 case Intrinsic::x86_avx512_vpdpbusds_512:
5909 case Intrinsic::x86_avx2_vpdpbssd_128:
5910 case Intrinsic::x86_avx2_vpdpbssd_256:
5911 case Intrinsic::x86_avx10_vpdpbssd_512:
5912 case Intrinsic::x86_avx2_vpdpbssds_128:
5913 case Intrinsic::x86_avx2_vpdpbssds_256:
5914 case Intrinsic::x86_avx10_vpdpbssds_512:
5915 case Intrinsic::x86_avx2_vpdpbsud_128:
5916 case Intrinsic::x86_avx2_vpdpbsud_256:
5917 case Intrinsic::x86_avx10_vpdpbsud_512:
5918 case Intrinsic::x86_avx2_vpdpbsuds_128:
5919 case Intrinsic::x86_avx2_vpdpbsuds_256:
5920 case Intrinsic::x86_avx10_vpdpbsuds_512:
5921 case Intrinsic::x86_avx2_vpdpbuud_128:
5922 case Intrinsic::x86_avx2_vpdpbuud_256:
5923 case Intrinsic::x86_avx10_vpdpbuud_512:
5924 case Intrinsic::x86_avx2_vpdpbuuds_128:
5925 case Intrinsic::x86_avx2_vpdpbuuds_256:
5926 case Intrinsic::x86_avx10_vpdpbuuds_512:
5927 handleVectorPmaddIntrinsic(
I, 4,
6023 case Intrinsic::x86_avx512_vpdpwssd_128:
6024 case Intrinsic::x86_avx512_vpdpwssd_256:
6025 case Intrinsic::x86_avx512_vpdpwssd_512:
6026 case Intrinsic::x86_avx512_vpdpwssds_128:
6027 case Intrinsic::x86_avx512_vpdpwssds_256:
6028 case Intrinsic::x86_avx512_vpdpwssds_512:
6029 case Intrinsic::x86_avx2_vpdpwsud_128:
6030 case Intrinsic::x86_avx2_vpdpwsud_256:
6031 case Intrinsic::x86_avx10_vpdpwsud_512:
6032 case Intrinsic::x86_avx2_vpdpwsuds_128:
6033 case Intrinsic::x86_avx2_vpdpwsuds_256:
6034 case Intrinsic::x86_avx10_vpdpwsuds_512:
6035 case Intrinsic::x86_avx2_vpdpwusd_128:
6036 case Intrinsic::x86_avx2_vpdpwusd_256:
6037 case Intrinsic::x86_avx10_vpdpwusd_512:
6038 case Intrinsic::x86_avx2_vpdpwusds_128:
6039 case Intrinsic::x86_avx2_vpdpwusds_256:
6040 case Intrinsic::x86_avx10_vpdpwusds_512:
6041 case Intrinsic::x86_avx2_vpdpwuud_128:
6042 case Intrinsic::x86_avx2_vpdpwuud_256:
6043 case Intrinsic::x86_avx10_vpdpwuud_512:
6044 case Intrinsic::x86_avx2_vpdpwuuds_128:
6045 case Intrinsic::x86_avx2_vpdpwuuds_256:
6046 case Intrinsic::x86_avx10_vpdpwuuds_512:
6047 handleVectorPmaddIntrinsic(
I, 2,
6059 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
6060 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
6061 case Intrinsic::x86_avx512bf16_dpbf16ps_512:
6062 handleVectorPmaddIntrinsic(
I, 2,
6066 case Intrinsic::x86_sse_cmp_ss:
6067 case Intrinsic::x86_sse2_cmp_sd:
6068 case Intrinsic::x86_sse_comieq_ss:
6069 case Intrinsic::x86_sse_comilt_ss:
6070 case Intrinsic::x86_sse_comile_ss:
6071 case Intrinsic::x86_sse_comigt_ss:
6072 case Intrinsic::x86_sse_comige_ss:
6073 case Intrinsic::x86_sse_comineq_ss:
6074 case Intrinsic::x86_sse_ucomieq_ss:
6075 case Intrinsic::x86_sse_ucomilt_ss:
6076 case Intrinsic::x86_sse_ucomile_ss:
6077 case Intrinsic::x86_sse_ucomigt_ss:
6078 case Intrinsic::x86_sse_ucomige_ss:
6079 case Intrinsic::x86_sse_ucomineq_ss:
6080 case Intrinsic::x86_sse2_comieq_sd:
6081 case Intrinsic::x86_sse2_comilt_sd:
6082 case Intrinsic::x86_sse2_comile_sd:
6083 case Intrinsic::x86_sse2_comigt_sd:
6084 case Intrinsic::x86_sse2_comige_sd:
6085 case Intrinsic::x86_sse2_comineq_sd:
6086 case Intrinsic::x86_sse2_ucomieq_sd:
6087 case Intrinsic::x86_sse2_ucomilt_sd:
6088 case Intrinsic::x86_sse2_ucomile_sd:
6089 case Intrinsic::x86_sse2_ucomigt_sd:
6090 case Intrinsic::x86_sse2_ucomige_sd:
6091 case Intrinsic::x86_sse2_ucomineq_sd:
6092 handleVectorCompareScalarIntrinsic(
I);
6095 case Intrinsic::x86_avx_cmp_pd_256:
6096 case Intrinsic::x86_avx_cmp_ps_256:
6097 case Intrinsic::x86_sse2_cmp_pd:
6098 case Intrinsic::x86_sse_cmp_ps:
6099 handleVectorComparePackedIntrinsic(
I);
6102 case Intrinsic::x86_bmi_bextr_32:
6103 case Intrinsic::x86_bmi_bextr_64:
6104 case Intrinsic::x86_bmi_bzhi_32:
6105 case Intrinsic::x86_bmi_bzhi_64:
6106 case Intrinsic::x86_bmi_pdep_32:
6107 case Intrinsic::x86_bmi_pdep_64:
6108 case Intrinsic::x86_bmi_pext_32:
6109 case Intrinsic::x86_bmi_pext_64:
6110 handleBmiIntrinsic(
I);
6113 case Intrinsic::x86_pclmulqdq:
6114 case Intrinsic::x86_pclmulqdq_256:
6115 case Intrinsic::x86_pclmulqdq_512:
6116 handlePclmulIntrinsic(
I);
6119 case Intrinsic::x86_avx_round_pd_256:
6120 case Intrinsic::x86_avx_round_ps_256:
6121 case Intrinsic::x86_sse41_round_pd:
6122 case Intrinsic::x86_sse41_round_ps:
6123 handleRoundPdPsIntrinsic(
I);
6126 case Intrinsic::x86_sse41_round_sd:
6127 case Intrinsic::x86_sse41_round_ss:
6128 handleUnarySdSsIntrinsic(
I);
6131 case Intrinsic::x86_sse2_max_sd:
6132 case Intrinsic::x86_sse_max_ss:
6133 case Intrinsic::x86_sse2_min_sd:
6134 case Intrinsic::x86_sse_min_ss:
6135 handleBinarySdSsIntrinsic(
I);
6138 case Intrinsic::x86_avx_vtestc_pd:
6139 case Intrinsic::x86_avx_vtestc_pd_256:
6140 case Intrinsic::x86_avx_vtestc_ps:
6141 case Intrinsic::x86_avx_vtestc_ps_256:
6142 case Intrinsic::x86_avx_vtestnzc_pd:
6143 case Intrinsic::x86_avx_vtestnzc_pd_256:
6144 case Intrinsic::x86_avx_vtestnzc_ps:
6145 case Intrinsic::x86_avx_vtestnzc_ps_256:
6146 case Intrinsic::x86_avx_vtestz_pd:
6147 case Intrinsic::x86_avx_vtestz_pd_256:
6148 case Intrinsic::x86_avx_vtestz_ps:
6149 case Intrinsic::x86_avx_vtestz_ps_256:
6150 case Intrinsic::x86_avx_ptestc_256:
6151 case Intrinsic::x86_avx_ptestnzc_256:
6152 case Intrinsic::x86_avx_ptestz_256:
6153 case Intrinsic::x86_sse41_ptestc:
6154 case Intrinsic::x86_sse41_ptestnzc:
6155 case Intrinsic::x86_sse41_ptestz:
6156 handleVtestIntrinsic(
I);
6160 case Intrinsic::x86_ssse3_phadd_w:
6161 case Intrinsic::x86_ssse3_phadd_w_128:
6162 case Intrinsic::x86_ssse3_phsub_w:
6163 case Intrinsic::x86_ssse3_phsub_w_128:
6164 handlePairwiseShadowOrIntrinsic(
I, 1,
6168 case Intrinsic::x86_avx2_phadd_w:
6169 case Intrinsic::x86_avx2_phsub_w:
6170 handlePairwiseShadowOrIntrinsic(
I, 2,
6175 case Intrinsic::x86_ssse3_phadd_d:
6176 case Intrinsic::x86_ssse3_phadd_d_128:
6177 case Intrinsic::x86_ssse3_phsub_d:
6178 case Intrinsic::x86_ssse3_phsub_d_128:
6179 handlePairwiseShadowOrIntrinsic(
I, 1,
6183 case Intrinsic::x86_avx2_phadd_d:
6184 case Intrinsic::x86_avx2_phsub_d:
6185 handlePairwiseShadowOrIntrinsic(
I, 2,
6190 case Intrinsic::x86_ssse3_phadd_sw:
6191 case Intrinsic::x86_ssse3_phadd_sw_128:
6192 case Intrinsic::x86_ssse3_phsub_sw:
6193 case Intrinsic::x86_ssse3_phsub_sw_128:
6194 handlePairwiseShadowOrIntrinsic(
I, 1,
6198 case Intrinsic::x86_avx2_phadd_sw:
6199 case Intrinsic::x86_avx2_phsub_sw:
6200 handlePairwiseShadowOrIntrinsic(
I, 2,
6205 case Intrinsic::x86_sse3_hadd_ps:
6206 case Intrinsic::x86_sse3_hadd_pd:
6207 case Intrinsic::x86_sse3_hsub_ps:
6208 case Intrinsic::x86_sse3_hsub_pd:
6209 handlePairwiseShadowOrIntrinsic(
I, 1);
6212 case Intrinsic::x86_avx_hadd_pd_256:
6213 case Intrinsic::x86_avx_hadd_ps_256:
6214 case Intrinsic::x86_avx_hsub_pd_256:
6215 case Intrinsic::x86_avx_hsub_ps_256:
6216 handlePairwiseShadowOrIntrinsic(
I, 2);
6219 case Intrinsic::x86_avx_maskstore_ps:
6220 case Intrinsic::x86_avx_maskstore_pd:
6221 case Intrinsic::x86_avx_maskstore_ps_256:
6222 case Intrinsic::x86_avx_maskstore_pd_256:
6223 case Intrinsic::x86_avx2_maskstore_d:
6224 case Intrinsic::x86_avx2_maskstore_q:
6225 case Intrinsic::x86_avx2_maskstore_d_256:
6226 case Intrinsic::x86_avx2_maskstore_q_256: {
6227 handleAVXMaskedStore(
I);
6231 case Intrinsic::x86_avx_maskload_ps:
6232 case Intrinsic::x86_avx_maskload_pd:
6233 case Intrinsic::x86_avx_maskload_ps_256:
6234 case Intrinsic::x86_avx_maskload_pd_256:
6235 case Intrinsic::x86_avx2_maskload_d:
6236 case Intrinsic::x86_avx2_maskload_q:
6237 case Intrinsic::x86_avx2_maskload_d_256:
6238 case Intrinsic::x86_avx2_maskload_q_256: {
6239 handleAVXMaskedLoad(
I);
6244 case Intrinsic::x86_avx512fp16_add_ph_512:
6245 case Intrinsic::x86_avx512fp16_sub_ph_512:
6246 case Intrinsic::x86_avx512fp16_mul_ph_512:
6247 case Intrinsic::x86_avx512fp16_div_ph_512:
6248 case Intrinsic::x86_avx512fp16_max_ph_512:
6249 case Intrinsic::x86_avx512fp16_min_ph_512:
6250 case Intrinsic::x86_avx512_min_ps_512:
6251 case Intrinsic::x86_avx512_min_pd_512:
6252 case Intrinsic::x86_avx512_max_ps_512:
6253 case Intrinsic::x86_avx512_max_pd_512: {
6258 [[maybe_unused]]
bool Success =
6259 maybeHandleSimpleNomemIntrinsic(
I, 1);
6264 case Intrinsic::x86_avx_vpermilvar_pd:
6265 case Intrinsic::x86_avx_vpermilvar_pd_256:
6266 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6267 case Intrinsic::x86_avx_vpermilvar_ps:
6268 case Intrinsic::x86_avx_vpermilvar_ps_256:
6269 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6270 handleAVXVpermilvar(
I);
6274 case Intrinsic::x86_avx512_vpermi2var_d_128:
6275 case Intrinsic::x86_avx512_vpermi2var_d_256:
6276 case Intrinsic::x86_avx512_vpermi2var_d_512:
6277 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6278 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6279 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6280 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6281 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6282 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6283 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6284 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6285 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6286 case Intrinsic::x86_avx512_vpermi2var_q_128:
6287 case Intrinsic::x86_avx512_vpermi2var_q_256:
6288 case Intrinsic::x86_avx512_vpermi2var_q_512:
6289 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6290 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6291 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6292 handleAVXVpermi2var(
I);
6306 case Intrinsic::x86_avx2_pshuf_b:
6307 case Intrinsic::x86_sse_pshuf_w:
6308 case Intrinsic::x86_ssse3_pshuf_b_128:
6309 case Intrinsic::x86_ssse3_pshuf_b:
6310 case Intrinsic::x86_avx512_pshuf_b_512:
6311 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6317 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6318 case Intrinsic::x86_avx512_mask_pmov_db_512:
6319 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6320 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6323 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6331 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6332 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6333 handleIntrinsicByApplyingToShadow(
I,
6334 Intrinsic::x86_avx512_mask_pmov_dw_512,
6339 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6340 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6341 handleIntrinsicByApplyingToShadow(
I,
6342 Intrinsic::x86_avx512_mask_pmov_db_512,
6347 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6348 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6349 handleIntrinsicByApplyingToShadow(
I,
6350 Intrinsic::x86_avx512_mask_pmov_qb_512,
6355 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6356 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6357 handleIntrinsicByApplyingToShadow(
I,
6358 Intrinsic::x86_avx512_mask_pmov_qw_512,
6363 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6364 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6365 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6366 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6370 handleAVX512VectorDownConvert(
I);
6410 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6411 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6412 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6413 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6414 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6415 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6416 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6417 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6418 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6419 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6420 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6421 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6422 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6462 case Intrinsic::x86_avx512_rcp14_ps_512:
6463 case Intrinsic::x86_avx512_rcp14_ps_256:
6464 case Intrinsic::x86_avx512_rcp14_ps_128:
6465 case Intrinsic::x86_avx512_rcp14_pd_512:
6466 case Intrinsic::x86_avx512_rcp14_pd_256:
6467 case Intrinsic::x86_avx512_rcp14_pd_128:
6468 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6469 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6470 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6471 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6472 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6473 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6474 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6518 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6519 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6520 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6521 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6522 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6523 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6524 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6525 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6526 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6527 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6528 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6529 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6530 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6535 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6536 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6537 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6538 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6539 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6540 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6541 visitGenericScalarHalfwordInst(
I);
6546 case Intrinsic::x86_vgf2p8affineqb_128:
6547 case Intrinsic::x86_vgf2p8affineqb_256:
6548 case Intrinsic::x86_vgf2p8affineqb_512:
6549 handleAVXGF2P8Affine(
I);
6559 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6560 switch (
I.getIntrinsicID()) {
6561 case Intrinsic::aarch64_neon_rshrn:
6562 case Intrinsic::aarch64_neon_sqrshl:
6563 case Intrinsic::aarch64_neon_sqrshrn:
6564 case Intrinsic::aarch64_neon_sqrshrun:
6565 case Intrinsic::aarch64_neon_sqshl:
6566 case Intrinsic::aarch64_neon_sqshlu:
6567 case Intrinsic::aarch64_neon_sqshrn:
6568 case Intrinsic::aarch64_neon_sqshrun:
6569 case Intrinsic::aarch64_neon_srshl:
6570 case Intrinsic::aarch64_neon_sshl:
6571 case Intrinsic::aarch64_neon_uqrshl:
6572 case Intrinsic::aarch64_neon_uqrshrn:
6573 case Intrinsic::aarch64_neon_uqshl:
6574 case Intrinsic::aarch64_neon_uqshrn:
6575 case Intrinsic::aarch64_neon_urshl:
6576 case Intrinsic::aarch64_neon_ushl:
6578 handleVectorShiftIntrinsic(
I,
false);
6583 case Intrinsic::aarch64_neon_fmaxp:
6584 case Intrinsic::aarch64_neon_fminp:
6586 case Intrinsic::aarch64_neon_fmaxnmp:
6587 case Intrinsic::aarch64_neon_fminnmp:
6589 case Intrinsic::aarch64_neon_smaxp:
6590 case Intrinsic::aarch64_neon_sminp:
6591 case Intrinsic::aarch64_neon_umaxp:
6592 case Intrinsic::aarch64_neon_uminp:
6594 case Intrinsic::aarch64_neon_addp:
6596 case Intrinsic::aarch64_neon_faddp:
6598 case Intrinsic::aarch64_neon_saddlp:
6599 case Intrinsic::aarch64_neon_uaddlp: {
6600 handlePairwiseShadowOrIntrinsic(
I, 1);
6605 case Intrinsic::aarch64_neon_fcvtas:
6606 case Intrinsic::aarch64_neon_fcvtau:
6608 case Intrinsic::aarch64_neon_fcvtms:
6609 case Intrinsic::aarch64_neon_fcvtmu:
6611 case Intrinsic::aarch64_neon_fcvtns:
6612 case Intrinsic::aarch64_neon_fcvtnu:
6614 case Intrinsic::aarch64_neon_fcvtps:
6615 case Intrinsic::aarch64_neon_fcvtpu:
6617 case Intrinsic::aarch64_neon_fcvtzs:
6618 case Intrinsic::aarch64_neon_fcvtzu:
6620 case Intrinsic::aarch64_neon_fcvtxn: {
6621 handleNEONVectorConvertIntrinsic(
I);
6626 case Intrinsic::aarch64_neon_faddv:
6627 case Intrinsic::aarch64_neon_saddv:
6628 case Intrinsic::aarch64_neon_uaddv:
6631 case Intrinsic::aarch64_neon_smaxv:
6632 case Intrinsic::aarch64_neon_sminv:
6633 case Intrinsic::aarch64_neon_umaxv:
6634 case Intrinsic::aarch64_neon_uminv:
6638 case Intrinsic::aarch64_neon_fmaxv:
6639 case Intrinsic::aarch64_neon_fminv:
6640 case Intrinsic::aarch64_neon_fmaxnmv:
6641 case Intrinsic::aarch64_neon_fminnmv:
6643 case Intrinsic::aarch64_neon_saddlv:
6644 case Intrinsic::aarch64_neon_uaddlv:
6645 handleVectorReduceIntrinsic(
I,
true);
6648 case Intrinsic::aarch64_neon_ld1x2:
6649 case Intrinsic::aarch64_neon_ld1x3:
6650 case Intrinsic::aarch64_neon_ld1x4:
6651 case Intrinsic::aarch64_neon_ld2:
6652 case Intrinsic::aarch64_neon_ld3:
6653 case Intrinsic::aarch64_neon_ld4:
6654 case Intrinsic::aarch64_neon_ld2r:
6655 case Intrinsic::aarch64_neon_ld3r:
6656 case Intrinsic::aarch64_neon_ld4r: {
6657 handleNEONVectorLoad(
I,
false);
6661 case Intrinsic::aarch64_neon_ld2lane:
6662 case Intrinsic::aarch64_neon_ld3lane:
6663 case Intrinsic::aarch64_neon_ld4lane: {
6664 handleNEONVectorLoad(
I,
true);
6669 case Intrinsic::aarch64_neon_sqxtn:
6670 case Intrinsic::aarch64_neon_sqxtun:
6671 case Intrinsic::aarch64_neon_uqxtn:
6678 case Intrinsic::aarch64_neon_st1x2:
6679 case Intrinsic::aarch64_neon_st1x3:
6680 case Intrinsic::aarch64_neon_st1x4:
6681 case Intrinsic::aarch64_neon_st2:
6682 case Intrinsic::aarch64_neon_st3:
6683 case Intrinsic::aarch64_neon_st4: {
6684 handleNEONVectorStoreIntrinsic(
I,
false);
6688 case Intrinsic::aarch64_neon_st2lane:
6689 case Intrinsic::aarch64_neon_st3lane:
6690 case Intrinsic::aarch64_neon_st4lane: {
6691 handleNEONVectorStoreIntrinsic(
I,
true);
6704 case Intrinsic::aarch64_neon_tbl1:
6705 case Intrinsic::aarch64_neon_tbl2:
6706 case Intrinsic::aarch64_neon_tbl3:
6707 case Intrinsic::aarch64_neon_tbl4:
6708 case Intrinsic::aarch64_neon_tbx1:
6709 case Intrinsic::aarch64_neon_tbx2:
6710 case Intrinsic::aarch64_neon_tbx3:
6711 case Intrinsic::aarch64_neon_tbx4: {
6713 handleIntrinsicByApplyingToShadow(
6714 I,
I.getIntrinsicID(),
6719 case Intrinsic::aarch64_neon_fmulx:
6720 case Intrinsic::aarch64_neon_pmul:
6721 case Intrinsic::aarch64_neon_pmull:
6722 case Intrinsic::aarch64_neon_smull:
6723 case Intrinsic::aarch64_neon_pmull64:
6724 case Intrinsic::aarch64_neon_umull: {
6725 handleNEONVectorMultiplyIntrinsic(
I);
6736 void visitIntrinsicInst(IntrinsicInst &
I) {
6737 if (maybeHandleCrossPlatformIntrinsic(
I))
6740 if (maybeHandleX86SIMDIntrinsic(
I))
6743 if (maybeHandleArmSIMDIntrinsic(
I))
6746 if (maybeHandleUnknownIntrinsic(
I))
6749 visitInstruction(
I);
6752 void visitLibAtomicLoad(CallBase &CB) {
6763 Value *NewOrdering =
6767 NextNodeIRBuilder NextIRB(&CB);
6768 Value *SrcShadowPtr, *SrcOriginPtr;
6769 std::tie(SrcShadowPtr, SrcOriginPtr) =
6770 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6772 Value *DstShadowPtr =
6773 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6777 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6778 if (MS.TrackOrigins) {
6779 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6781 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6782 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6786 void visitLibAtomicStore(CallBase &CB) {
6793 Value *NewOrdering =
6797 Value *DstShadowPtr =
6807 void visitCallBase(CallBase &CB) {
6815 visitAsmInstruction(CB);
6817 visitInstruction(CB);
6826 case LibFunc_atomic_load:
6828 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6832 visitLibAtomicLoad(CB);
6834 case LibFunc_atomic_store:
6835 visitLibAtomicStore(CB);
6851 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6855 Func->removeFnAttrs(
B);
6861 bool MayCheckCall = MS.EagerChecks;
6865 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6868 unsigned ArgOffset = 0;
6871 if (!
A->getType()->isSized()) {
6872 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6876 if (
A->getType()->isScalableTy()) {
6877 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6879 insertCheckShadowOf(
A, &CB);
6884 const DataLayout &
DL =
F.getDataLayout();
6888 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6891 insertCheckShadowOf(
A, &CB);
6892 Size =
DL.getTypeAllocSize(
A->getType());
6898 Value *ArgShadow = getShadow(
A);
6899 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6901 <<
" Shadow: " << *ArgShadow <<
"\n");
6905 assert(
A->getType()->isPointerTy() &&
6906 "ByVal argument is not a pointer!");
6911 MaybeAlign Alignment = std::nullopt;
6914 Value *AShadowPtr, *AOriginPtr;
6915 std::tie(AShadowPtr, AOriginPtr) =
6916 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6918 if (!PropagateShadow) {
6925 if (MS.TrackOrigins) {
6926 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6940 Size =
DL.getTypeAllocSize(
A->getType());
6946 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6948 getOriginPtrForArgument(IRB, ArgOffset));
6951 assert(Store !=
nullptr);
6960 if (FT->isVarArg()) {
6961 VAHelper->visitCallBase(CB, IRB);
6971 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6972 setShadow(&CB, getCleanShadow(&CB));
6973 setOrigin(&CB, getCleanOrigin());
6979 Value *
Base = getShadowPtrForRetval(IRBBefore);
6980 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6992 setShadow(&CB, getCleanShadow(&CB));
6993 setOrigin(&CB, getCleanOrigin());
7000 "Could not find insertion point for retval shadow load");
7003 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
7006 setShadow(&CB, RetvalShadow);
7007 if (MS.TrackOrigins)
7008 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
7013 RetVal =
I->getOperand(0);
7016 return I->isMustTailCall();
7021 void visitReturnInst(ReturnInst &
I) {
7023 Value *RetVal =
I.getReturnValue();
7029 Value *ShadowPtr = getShadowPtrForRetval(IRB);
7030 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
7031 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
7034 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
7036 Value *Shadow = getShadow(RetVal);
7037 bool StoreOrigin =
true;
7039 insertCheckShadowOf(RetVal, &
I);
7040 Shadow = getCleanShadow(RetVal);
7041 StoreOrigin =
false;
7048 if (MS.TrackOrigins && StoreOrigin)
7049 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
7053 void visitPHINode(PHINode &
I) {
7055 if (!PropagateShadow) {
7056 setShadow(&
I, getCleanShadow(&
I));
7057 setOrigin(&
I, getCleanOrigin());
7061 ShadowPHINodes.push_back(&
I);
7062 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
7064 if (MS.TrackOrigins)
7066 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
7069 Value *getLocalVarIdptr(AllocaInst &
I) {
7070 ConstantInt *IntConst =
7071 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
7072 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
7077 Value *getLocalVarDescription(AllocaInst &
I) {
7083 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
7085 Value *ShadowBase, *OriginBase;
7086 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
7090 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
7093 if (PoisonStack && MS.TrackOrigins) {
7094 Value *Idptr = getLocalVarIdptr(
I);
7096 Value *Descr = getLocalVarDescription(
I);
7097 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
7098 {&I, Len, Idptr, Descr});
7100 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
7106 Value *Descr = getLocalVarDescription(
I);
7108 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
7110 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
7114 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
7117 NextNodeIRBuilder IRB(InsPoint);
7118 const DataLayout &
DL =
F.getDataLayout();
7119 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
7121 if (
I.isArrayAllocation())
7125 if (MS.CompileKernel)
7126 poisonAllocaKmsan(
I, IRB, Len);
7128 poisonAllocaUserspace(
I, IRB, Len);
7131 void visitAllocaInst(AllocaInst &
I) {
7132 setShadow(&
I, getCleanShadow(&
I));
7133 setOrigin(&
I, getCleanOrigin());
7139 void visitSelectInst(SelectInst &
I) {
7145 handleSelectLikeInst(
I,
B,
C,
D);
7151 Value *Sb = getShadow(
B);
7152 Value *Sc = getShadow(
C);
7153 Value *Sd = getShadow(
D);
7155 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
7156 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
7157 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
7162 if (
I.getType()->isAggregateType()) {
7166 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
7167 }
else if (isScalableNonVectorType(
I.getType())) {
7175 Sa1 = getCleanShadow(getShadowTy(
I.getType()));
7183 C = CreateAppToShadowCast(IRB,
C);
7184 D = CreateAppToShadowCast(IRB,
D);
7191 if (MS.TrackOrigins) {
7194 if (
B->getType()->isVectorTy()) {
7195 B = convertToBool(
B, IRB);
7196 Sb = convertToBool(Sb, IRB);
7204 void visitLandingPadInst(LandingPadInst &
I) {
7207 setShadow(&
I, getCleanShadow(&
I));
7208 setOrigin(&
I, getCleanOrigin());
7211 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7212 setShadow(&
I, getCleanShadow(&
I));
7213 setOrigin(&
I, getCleanOrigin());
7216 void visitFuncletPadInst(FuncletPadInst &
I) {
7217 setShadow(&
I, getCleanShadow(&
I));
7218 setOrigin(&
I, getCleanOrigin());
7221 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7223 void visitExtractValueInst(ExtractValueInst &
I) {
7225 Value *Agg =
I.getAggregateOperand();
7227 Value *AggShadow = getShadow(Agg);
7231 setShadow(&
I, ResShadow);
7232 setOriginForNaryOp(
I);
7235 void visitInsertValueInst(InsertValueInst &
I) {
7238 Value *AggShadow = getShadow(
I.getAggregateOperand());
7239 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7245 setOriginForNaryOp(
I);
7248 void dumpInst(Instruction &
I) {
7252 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7254 errs() <<
"QQQ " <<
I <<
"\n";
7257 void visitResumeInst(ResumeInst &
I) {
7262 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7267 void visitCatchReturnInst(CatchReturnInst &CRI) {
7272 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7281 insertCheckShadowOf(Operand, &
I);
7288 auto Size =
DL.getTypeStoreSize(ElemTy);
7290 if (MS.CompileKernel) {
7291 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7297 auto [ShadowPtr,
_] =
7298 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7308 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7309 int NumRetOutputs = 0;
7316 NumRetOutputs =
ST->getNumElements();
7321 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7322 switch (
Info.Type) {
7330 return NumOutputs - NumRetOutputs;
7333 void visitAsmInstruction(Instruction &
I) {
7349 const DataLayout &
DL =
F.getDataLayout();
7353 int OutputArgs = getNumOutputArgs(IA, CB);
7359 for (
int i = OutputArgs; i < NumOperands; i++) {
7367 for (
int i = 0; i < OutputArgs; i++) {
7373 setShadow(&
I, getCleanShadow(&
I));
7374 setOrigin(&
I, getCleanOrigin());
7377 void visitFreezeInst(FreezeInst &
I) {
7379 setShadow(&
I, getCleanShadow(&
I));
7380 setOrigin(&
I, getCleanOrigin());
7383 void visitInstruction(Instruction &
I) {
7388 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7389 Value *Operand =
I.getOperand(i);
7391 insertCheckShadowOf(Operand, &
I);
7393 setShadow(&
I, getCleanShadow(&
I));
7394 setOrigin(&
I, getCleanOrigin());
7398struct VarArgHelperBase :
public VarArgHelper {
7400 MemorySanitizer &MS;
7401 MemorySanitizerVisitor &MSV;
7403 const unsigned VAListTagSize;
7405 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7406 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7407 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7411 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7417 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7426 return getShadowPtrForVAArgument(IRB, ArgOffset);
7435 ConstantInt::get(MS.IntptrTy, ArgOffset),
7440 unsigned BaseOffset) {
7449 TailSize,
Align(8));
7452 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7454 Value *VAListTag =
I.getArgOperand(0);
7456 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7457 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7460 VAListTagSize, Alignment,
false);
7463 void visitVAStartInst(VAStartInst &
I)
override {
7464 if (
F.getCallingConv() == CallingConv::Win64)
7467 unpoisonVAListTagForInst(
I);
7470 void visitVACopyInst(VACopyInst &
I)
override {
7471 if (
F.getCallingConv() == CallingConv::Win64)
7473 unpoisonVAListTagForInst(
I);
7478struct VarArgAMD64Helper :
public VarArgHelperBase {
7481 static const unsigned AMD64GpEndOffset = 48;
7482 static const unsigned AMD64FpEndOffsetSSE = 176;
7484 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7486 unsigned AMD64FpEndOffset;
7487 AllocaInst *VAArgTLSCopy =
nullptr;
7488 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7489 Value *VAArgOverflowSize =
nullptr;
7491 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7493 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7494 MemorySanitizerVisitor &MSV)
7495 : VarArgHelperBase(
F, MS, MSV, 24) {
7496 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7497 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7498 if (Attr.isStringAttribute() &&
7499 (Attr.getKindAsString() ==
"target-features")) {
7500 if (Attr.getValueAsString().contains(
"-sse"))
7501 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7507 ArgKind classifyArgument(
Value *arg) {
7510 if (
T->isX86_FP80Ty())
7512 if (
T->isFPOrFPVectorTy())
7513 return AK_FloatingPoint;
7514 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7515 return AK_GeneralPurpose;
7516 if (
T->isPointerTy())
7517 return AK_GeneralPurpose;
7529 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7530 unsigned GpOffset = 0;
7531 unsigned FpOffset = AMD64GpEndOffset;
7532 unsigned OverflowOffset = AMD64FpEndOffset;
7533 const DataLayout &
DL =
F.getDataLayout();
7537 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7544 assert(
A->getType()->isPointerTy());
7546 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7547 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7548 unsigned BaseOffset = OverflowOffset;
7549 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7550 Value *OriginBase =
nullptr;
7551 if (MS.TrackOrigins)
7552 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7553 OverflowOffset += AlignedSize;
7556 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7560 Value *ShadowPtr, *OriginPtr;
7561 std::tie(ShadowPtr, OriginPtr) =
7566 if (MS.TrackOrigins)
7570 ArgKind AK = classifyArgument(
A);
7571 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7573 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7575 Value *ShadowBase, *OriginBase =
nullptr;
7577 case AK_GeneralPurpose:
7578 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7579 if (MS.TrackOrigins)
7580 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7584 case AK_FloatingPoint:
7585 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7586 if (MS.TrackOrigins)
7587 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7594 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7595 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7596 unsigned BaseOffset = OverflowOffset;
7597 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7598 if (MS.TrackOrigins) {
7599 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7601 OverflowOffset += AlignedSize;
7604 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7613 Value *Shadow = MSV.getShadow(
A);
7615 if (MS.TrackOrigins) {
7616 Value *Origin = MSV.getOrigin(
A);
7617 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7618 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7624 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7625 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7628 void finalizeInstrumentation()
override {
7629 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7630 "finalizeInstrumentation called twice");
7631 if (!VAStartInstrumentationList.
empty()) {
7638 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7639 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7645 Intrinsic::umin, CopySize,
7649 if (MS.TrackOrigins) {
7650 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7659 for (CallInst *OrigInst : VAStartInstrumentationList) {
7660 NextNodeIRBuilder IRB(OrigInst);
7661 Value *VAListTag = OrigInst->getArgOperand(0);
7663 Value *RegSaveAreaPtrPtr =
7664 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7666 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7668 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7669 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7671 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7673 if (MS.TrackOrigins)
7674 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7675 Alignment, AMD64FpEndOffset);
7676 Value *OverflowArgAreaPtrPtr =
7677 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7678 Value *OverflowArgAreaPtr =
7679 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7680 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7681 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7682 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7686 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7688 if (MS.TrackOrigins) {
7691 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7699struct VarArgAArch64Helper :
public VarArgHelperBase {
7700 static const unsigned kAArch64GrArgSize = 64;
7701 static const unsigned kAArch64VrArgSize = 128;
7703 static const unsigned AArch64GrBegOffset = 0;
7704 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7706 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7707 static const unsigned AArch64VrEndOffset =
7708 AArch64VrBegOffset + kAArch64VrArgSize;
7709 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7711 AllocaInst *VAArgTLSCopy =
nullptr;
7712 Value *VAArgOverflowSize =
nullptr;
7714 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7716 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7717 MemorySanitizerVisitor &MSV)
7718 : VarArgHelperBase(
F, MS, MSV, 32) {}
7721 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7722 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7723 return {AK_GeneralPurpose, 1};
7724 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7725 return {AK_FloatingPoint, 1};
7727 if (
T->isArrayTy()) {
7728 auto R = classifyArgument(
T->getArrayElementType());
7729 R.second *=
T->getScalarType()->getArrayNumElements();
7734 auto R = classifyArgument(FV->getScalarType());
7735 R.second *= FV->getNumElements();
7740 return {AK_Memory, 0};
7752 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7753 unsigned GrOffset = AArch64GrBegOffset;
7754 unsigned VrOffset = AArch64VrBegOffset;
7755 unsigned OverflowOffset = AArch64VAEndOffset;
7757 const DataLayout &
DL =
F.getDataLayout();
7760 auto [AK, RegNum] = classifyArgument(
A->getType());
7761 if (AK == AK_GeneralPurpose &&
7762 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7764 if (AK == AK_FloatingPoint &&
7765 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7769 case AK_GeneralPurpose:
7770 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7771 GrOffset += 8 * RegNum;
7773 case AK_FloatingPoint:
7774 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7775 VrOffset += 16 * RegNum;
7782 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7783 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7784 unsigned BaseOffset = OverflowOffset;
7785 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7786 OverflowOffset += AlignedSize;
7789 CleanUnusedTLS(IRB,
Base, BaseOffset);
7801 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7802 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7807 Value *SaveAreaPtrPtr =
7808 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7809 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7814 Value *SaveAreaPtr =
7815 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7817 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7820 void finalizeInstrumentation()
override {
7821 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7822 "finalizeInstrumentation called twice");
7823 if (!VAStartInstrumentationList.empty()) {
7830 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7831 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7837 Intrinsic::umin, CopySize,
7843 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7844 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7848 for (CallInst *OrigInst : VAStartInstrumentationList) {
7849 NextNodeIRBuilder IRB(OrigInst);
7851 Value *VAListTag = OrigInst->getArgOperand(0);
7868 Value *StackSaveAreaPtr =
7869 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7872 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7873 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7876 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7879 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7880 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7883 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7889 Value *GrRegSaveAreaShadowPtrOff =
7890 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7892 Value *GrRegSaveAreaShadowPtr =
7893 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7899 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7905 Value *VrRegSaveAreaShadowPtrOff =
7906 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7908 Value *VrRegSaveAreaShadowPtr =
7909 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7916 VrRegSaveAreaShadowPtrOff);
7917 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7923 Value *StackSaveAreaShadowPtr =
7924 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7929 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7932 Align(16), VAArgOverflowSize);
7938struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7939 AllocaInst *VAArgTLSCopy =
nullptr;
7940 Value *VAArgSize =
nullptr;
7942 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7943 MemorySanitizerVisitor &MSV)
7944 : VarArgHelperBase(
F, MS, MSV, 8) {}
7946 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7954 Triple TargetTriple(
F.getParent()->getTargetTriple());
7958 if (TargetTriple.isPPC64ELFv2ABI())
7962 unsigned VAArgOffset = VAArgBase;
7963 const DataLayout &
DL =
F.getDataLayout();
7966 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7968 assert(
A->getType()->isPointerTy());
7970 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7973 ArgAlign =
Align(8);
7974 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7977 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7979 Value *AShadowPtr, *AOriginPtr;
7980 std::tie(AShadowPtr, AOriginPtr) =
7981 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7991 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7993 if (
A->getType()->isArrayTy()) {
7996 Type *ElementTy =
A->getType()->getArrayElementType();
7998 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7999 }
else if (
A->getType()->isVectorTy()) {
8001 ArgAlign =
Align(ArgSize);
8004 ArgAlign =
Align(8);
8005 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8006 if (
DL.isBigEndian()) {
8010 VAArgOffset += (8 - ArgSize);
8014 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8018 VAArgOffset += ArgSize;
8022 VAArgBase = VAArgOffset;
8026 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8029 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8032 void finalizeInstrumentation()
override {
8033 assert(!VAArgSize && !VAArgTLSCopy &&
8034 "finalizeInstrumentation called twice");
8037 Value *CopySize = VAArgSize;
8039 if (!VAStartInstrumentationList.empty()) {
8043 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8049 Intrinsic::umin, CopySize,
8057 for (CallInst *OrigInst : VAStartInstrumentationList) {
8058 NextNodeIRBuilder IRB(OrigInst);
8059 Value *VAListTag = OrigInst->getArgOperand(0);
8062 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8065 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8066 const DataLayout &
DL =
F.getDataLayout();
8067 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8069 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8070 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8072 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8079struct VarArgPowerPC32Helper :
public VarArgHelperBase {
8080 AllocaInst *VAArgTLSCopy =
nullptr;
8081 Value *VAArgSize =
nullptr;
8083 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
8084 MemorySanitizerVisitor &MSV)
8085 : VarArgHelperBase(
F, MS, MSV, 12) {}
8087 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8091 unsigned VAArgOffset = VAArgBase;
8092 const DataLayout &
DL =
F.getDataLayout();
8093 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8096 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8098 assert(
A->getType()->isPointerTy());
8100 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8102 if (ArgAlign < IntptrSize)
8103 ArgAlign =
Align(IntptrSize);
8104 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8107 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8109 Value *AShadowPtr, *AOriginPtr;
8110 std::tie(AShadowPtr, AOriginPtr) =
8111 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8121 Type *ArgTy =
A->getType();
8127 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
8134 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8137 ArgAlign =
Align(ArgSize);
8139 if (ArgAlign < IntptrSize)
8140 ArgAlign =
Align(IntptrSize);
8141 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8142 if (
DL.isBigEndian()) {
8145 if (ArgSize < IntptrSize)
8146 VAArgOffset += (IntptrSize - ArgSize);
8149 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
8155 VAArgOffset += ArgSize;
8162 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8165 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8168 void finalizeInstrumentation()
override {
8169 assert(!VAArgSize && !VAArgTLSCopy &&
8170 "finalizeInstrumentation called twice");
8172 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8173 Value *CopySize = VAArgSize;
8175 if (!VAStartInstrumentationList.empty()) {
8179 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8185 Intrinsic::umin, CopySize,
8193 for (CallInst *OrigInst : VAStartInstrumentationList) {
8194 NextNodeIRBuilder IRB(OrigInst);
8195 Value *VAListTag = OrigInst->getArgOperand(0);
8197 Value *RegSaveAreaSize = CopySize;
8201 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8205 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8207 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8210 const DataLayout &
DL =
F.getDataLayout();
8211 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8215 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8216 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8217 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8219 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8220 Alignment, RegSaveAreaSize);
8222 RegSaveAreaShadowPtr =
8225 ConstantInt::get(MS.IntptrTy, 32));
8230 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8235 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8238 OverflowAreaPtrPtr =
8239 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8240 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8242 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8244 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8245 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8246 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8249 Value *OverflowVAArgTLSCopyPtr =
8251 OverflowVAArgTLSCopyPtr =
8252 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8254 OverflowVAArgTLSCopyPtr =
8257 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8264struct VarArgSystemZHelper :
public VarArgHelperBase {
8265 static const unsigned SystemZGpOffset = 16;
8266 static const unsigned SystemZGpEndOffset = 56;
8267 static const unsigned SystemZFpOffset = 128;
8268 static const unsigned SystemZFpEndOffset = 160;
8269 static const unsigned SystemZMaxVrArgs = 8;
8270 static const unsigned SystemZRegSaveAreaSize = 160;
8271 static const unsigned SystemZOverflowOffset = 160;
8272 static const unsigned SystemZVAListTagSize = 32;
8273 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8274 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8276 bool IsSoftFloatABI;
8277 AllocaInst *VAArgTLSCopy =
nullptr;
8278 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8279 Value *VAArgOverflowSize =
nullptr;
8281 enum class ArgKind {
8289 enum class ShadowExtension {
None,
Zero, Sign };
8291 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8292 MemorySanitizerVisitor &MSV)
8293 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8294 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8296 ArgKind classifyArgument(
Type *
T) {
8303 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8304 return ArgKind::Indirect;
8305 if (
T->isFloatingPointTy())
8306 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8307 if (
T->isIntegerTy() ||
T->isPointerTy())
8308 return ArgKind::GeneralPurpose;
8309 if (
T->isVectorTy())
8310 return ArgKind::Vector;
8311 return ArgKind::Memory;
8314 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8324 return ShadowExtension::Zero;
8328 return ShadowExtension::Sign;
8330 return ShadowExtension::None;
8333 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8334 unsigned GpOffset = SystemZGpOffset;
8335 unsigned FpOffset = SystemZFpOffset;
8336 unsigned VrIndex = 0;
8337 unsigned OverflowOffset = SystemZOverflowOffset;
8338 const DataLayout &
DL =
F.getDataLayout();
8344 ArgKind AK = classifyArgument(
T);
8345 if (AK == ArgKind::Indirect) {
8347 AK = ArgKind::GeneralPurpose;
8349 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8350 AK = ArgKind::Memory;
8351 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8352 AK = ArgKind::Memory;
8353 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8354 AK = ArgKind::Memory;
8355 Value *ShadowBase =
nullptr;
8356 Value *OriginBase =
nullptr;
8357 ShadowExtension SE = ShadowExtension::None;
8359 case ArgKind::GeneralPurpose: {
8361 uint64_t ArgSize = 8;
8364 SE = getShadowExtension(CB, ArgNo);
8365 uint64_t GapSize = 0;
8366 if (SE == ShadowExtension::None) {
8367 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8368 assert(ArgAllocSize <= ArgSize);
8369 GapSize = ArgSize - ArgAllocSize;
8371 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8372 if (MS.TrackOrigins)
8373 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8375 GpOffset += ArgSize;
8381 case ArgKind::FloatingPoint: {
8383 uint64_t ArgSize = 8;
8390 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8391 if (MS.TrackOrigins)
8392 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8394 FpOffset += ArgSize;
8400 case ArgKind::Vector: {
8407 case ArgKind::Memory: {
8412 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8413 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8415 SE = getShadowExtension(CB, ArgNo);
8417 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8419 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8420 if (MS.TrackOrigins)
8422 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8423 OverflowOffset += ArgSize;
8430 case ArgKind::Indirect:
8433 if (ShadowBase ==
nullptr)
8435 Value *Shadow = MSV.getShadow(
A);
8436 if (SE != ShadowExtension::None)
8437 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8438 SE == ShadowExtension::Sign);
8439 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8441 if (MS.TrackOrigins) {
8442 Value *Origin = MSV.getOrigin(
A);
8443 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8444 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8448 Constant *OverflowSize = ConstantInt::get(
8449 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8450 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8457 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8460 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8462 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8463 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8468 unsigned RegSaveAreaSize =
8469 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8470 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8472 if (MS.TrackOrigins)
8473 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8474 Alignment, RegSaveAreaSize);
8483 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8485 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8486 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8488 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8489 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8492 SystemZOverflowOffset);
8493 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8495 if (MS.TrackOrigins) {
8497 SystemZOverflowOffset);
8498 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8503 void finalizeInstrumentation()
override {
8504 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8505 "finalizeInstrumentation called twice");
8506 if (!VAStartInstrumentationList.empty()) {
8513 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8515 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8521 Intrinsic::umin, CopySize,
8525 if (MS.TrackOrigins) {
8526 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8535 for (CallInst *OrigInst : VAStartInstrumentationList) {
8536 NextNodeIRBuilder IRB(OrigInst);
8537 Value *VAListTag = OrigInst->getArgOperand(0);
8538 copyRegSaveArea(IRB, VAListTag);
8539 copyOverflowArea(IRB, VAListTag);
8545struct VarArgI386Helper :
public VarArgHelperBase {
8546 AllocaInst *VAArgTLSCopy =
nullptr;
8547 Value *VAArgSize =
nullptr;
8549 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8550 MemorySanitizerVisitor &MSV)
8551 : VarArgHelperBase(
F, MS, MSV, 4) {}
8553 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8554 const DataLayout &
DL =
F.getDataLayout();
8555 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8556 unsigned VAArgOffset = 0;
8559 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8561 assert(
A->getType()->isPointerTy());
8563 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8565 if (ArgAlign < IntptrSize)
8566 ArgAlign =
Align(IntptrSize);
8567 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8569 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8571 Value *AShadowPtr, *AOriginPtr;
8572 std::tie(AShadowPtr, AOriginPtr) =
8573 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8583 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8585 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8586 if (
DL.isBigEndian()) {
8589 if (ArgSize < IntptrSize)
8590 VAArgOffset += (IntptrSize - ArgSize);
8593 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8596 VAArgOffset += ArgSize;
8602 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8605 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8608 void finalizeInstrumentation()
override {
8609 assert(!VAArgSize && !VAArgTLSCopy &&
8610 "finalizeInstrumentation called twice");
8612 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8613 Value *CopySize = VAArgSize;
8615 if (!VAStartInstrumentationList.empty()) {
8618 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8624 Intrinsic::umin, CopySize,
8632 for (CallInst *OrigInst : VAStartInstrumentationList) {
8633 NextNodeIRBuilder IRB(OrigInst);
8634 Value *VAListTag = OrigInst->getArgOperand(0);
8635 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8636 Value *RegSaveAreaPtrPtr =
8638 PointerType::get(*MS.C, 0));
8639 Value *RegSaveAreaPtr =
8640 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8641 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8642 const DataLayout &
DL =
F.getDataLayout();
8643 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8645 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8646 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8648 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8656struct VarArgGenericHelper :
public VarArgHelperBase {
8657 AllocaInst *VAArgTLSCopy =
nullptr;
8658 Value *VAArgSize =
nullptr;
8660 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8661 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8662 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8664 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8665 unsigned VAArgOffset = 0;
8666 const DataLayout &
DL =
F.getDataLayout();
8667 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8672 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8673 if (
DL.isBigEndian()) {
8676 if (ArgSize < IntptrSize)
8677 VAArgOffset += (IntptrSize - ArgSize);
8679 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8680 VAArgOffset += ArgSize;
8681 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8687 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8690 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8693 void finalizeInstrumentation()
override {
8694 assert(!VAArgSize && !VAArgTLSCopy &&
8695 "finalizeInstrumentation called twice");
8697 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8698 Value *CopySize = VAArgSize;
8700 if (!VAStartInstrumentationList.empty()) {
8703 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8709 Intrinsic::umin, CopySize,
8717 for (CallInst *OrigInst : VAStartInstrumentationList) {
8718 NextNodeIRBuilder IRB(OrigInst);
8719 Value *VAListTag = OrigInst->getArgOperand(0);
8720 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8721 Value *RegSaveAreaPtrPtr =
8723 PointerType::get(*MS.C, 0));
8724 Value *RegSaveAreaPtr =
8725 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8726 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8727 const DataLayout &
DL =
F.getDataLayout();
8728 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8730 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8731 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8733 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8741using VarArgARM32Helper = VarArgGenericHelper;
8742using VarArgRISCVHelper = VarArgGenericHelper;
8743using VarArgMIPSHelper = VarArgGenericHelper;
8744using VarArgLoongArch64Helper = VarArgGenericHelper;
8747struct VarArgNoOpHelper :
public VarArgHelper {
8748 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8749 MemorySanitizerVisitor &MSV) {}
8751 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8753 void visitVAStartInst(VAStartInst &
I)
override {}
8755 void visitVACopyInst(VACopyInst &
I)
override {}
8757 void finalizeInstrumentation()
override {}
8763 MemorySanitizerVisitor &Visitor) {
8766 Triple TargetTriple(Func.getParent()->getTargetTriple());
8769 return new VarArgI386Helper(Func, Msan, Visitor);
8772 return new VarArgAMD64Helper(Func, Msan, Visitor);
8774 if (TargetTriple.
isARM())
8775 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8778 return new VarArgAArch64Helper(Func, Msan, Visitor);
8781 return new VarArgSystemZHelper(Func, Msan, Visitor);
8786 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8789 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8792 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8795 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8798 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8801 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8804 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8807 return new VarArgNoOpHelper(Func, Msan, Visitor);
8814 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8817 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8824 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(CounterInfo &Counter)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.