184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
241 "msan-track-origins",
246 cl::desc(
"keep going after reporting a UMR"),
255 "msan-poison-stack-with-call",
260 "msan-poison-stack-pattern",
261 cl::desc(
"poison uninitialized stack variables with the given pattern"),
266 cl::desc(
"Print name of local stack variable"),
271 cl::desc(
"Poison fully undef temporary values. "
272 "Partially undefined constant vectors "
273 "are unaffected by this flag (see "
274 "-msan-poison-undef-vectors)."),
278 "msan-poison-undef-vectors",
279 cl::desc(
"Precisely poison partially undefined constant vectors. "
280 "If false (legacy behavior), the entire vector is "
281 "considered fully initialized, which may lead to false "
282 "negatives. Fully undefined constant vectors are "
283 "unaffected by this flag (see -msan-poison-undef)."),
287 "msan-precise-disjoint-or",
288 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
289 "disjointedness is ignored (i.e., 1|1 is initialized)."),
294 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
299 cl::desc(
"exact handling of relational integer ICmp"),
303 "msan-handle-lifetime-intrinsics",
305 "when possible, poison scoped variables at the beginning of the scope "
306 "(slower, but more precise)"),
317 "msan-handle-asm-conservative",
328 "msan-check-access-address",
329 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
334 cl::desc(
"check arguments and return values at function call boundaries"),
338 "msan-dump-strict-instructions",
339 cl::desc(
"print out instructions with default strict semantics i.e.,"
340 "check that all the inputs are fully initialized, and mark "
341 "the output as fully initialized. These semantics are applied "
342 "to instructions that could not be handled explicitly nor "
351 "msan-dump-heuristic-instructions",
352 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
353 "Use -msan-dump-strict-instructions to print instructions that "
354 "could not be handled explicitly nor heuristically."),
358 "msan-instrumentation-with-call-threshold",
360 "If the function being instrumented requires more than "
361 "this number of checks and origin stores, use callbacks instead of "
362 "inline checks (-1 means never use callbacks)."),
367 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
377 cl::desc(
"Insert checks for constant shadow values"),
384 cl::desc(
"Place MSan constructors in comdat sections"),
390 cl::desc(
"Define custom MSan AndMask"),
394 cl::desc(
"Define custom MSan XorMask"),
398 cl::desc(
"Define custom MSan ShadowBase"),
402 cl::desc(
"Define custom MSan OriginBase"),
407 cl::desc(
"Define threshold for number of checks per "
408 "debug location to force origin update."),
420struct MemoryMapParams {
427struct PlatformMemoryMapParams {
428 const MemoryMapParams *bits32;
429 const MemoryMapParams *bits64;
591class MemorySanitizer {
600 MemorySanitizer(MemorySanitizer &&) =
delete;
601 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
602 MemorySanitizer(
const MemorySanitizer &) =
delete;
603 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
605 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
608 friend struct MemorySanitizerVisitor;
609 friend struct VarArgHelperBase;
610 friend struct VarArgAMD64Helper;
611 friend struct VarArgAArch64Helper;
612 friend struct VarArgPowerPC64Helper;
613 friend struct VarArgPowerPC32Helper;
614 friend struct VarArgSystemZHelper;
615 friend struct VarArgI386Helper;
616 friend struct VarArgGenericHelper;
618 void initializeModule(
Module &M);
619 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
620 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
621 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
623 template <
typename... ArgsTy>
624 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
650 Value *ParamOriginTLS;
656 Value *RetvalOriginTLS;
662 Value *VAArgOriginTLS;
665 Value *VAArgOverflowSizeTLS;
668 bool CallbacksInitialized =
false;
671 FunctionCallee WarningFn;
675 FunctionCallee MaybeWarningVarSizeFn;
680 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
682 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
685 FunctionCallee MsanPoisonStackFn;
689 FunctionCallee MsanChainOriginFn;
692 FunctionCallee MsanSetOriginFn;
695 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
698 StructType *MsanContextStateTy;
699 FunctionCallee MsanGetContextStateFn;
702 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
708 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
709 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
710 FunctionCallee MsanMetadataPtrForStore_1_8[4];
711 FunctionCallee MsanInstrumentAsmStoreFn;
714 Value *MsanMetadataAlloca;
717 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
720 const MemoryMapParams *MapParams;
724 MemoryMapParams CustomMapParams;
726 MDNode *ColdCallWeights;
729 MDNode *OriginStoreWeights;
732void insertModuleCtor(
Module &M) {
769 if (!Options.Kernel) {
778 MemorySanitizer Msan(*
F.getParent(), Options);
797 OS, MapClassName2PassName);
803 if (Options.EagerChecks)
804 OS <<
"eager-checks;";
805 OS <<
"track-origins=" << Options.TrackOrigins;
821template <
typename... ArgsTy>
823MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
828 std::forward<ArgsTy>(Args)...);
831 return M.getOrInsertFunction(Name, MsanMetadata,
832 std::forward<ArgsTy>(Args)...);
841 RetvalOriginTLS =
nullptr;
843 ParamOriginTLS =
nullptr;
845 VAArgOriginTLS =
nullptr;
846 VAArgOverflowSizeTLS =
nullptr;
848 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
850 IRB.getVoidTy(), IRB.getInt32Ty());
861 MsanGetContextStateFn =
862 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
866 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
867 std::string name_load =
868 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
869 std::string name_store =
870 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
871 MsanMetadataPtrForLoad_1_8[ind] =
872 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
873 MsanMetadataPtrForStore_1_8[ind] =
874 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
877 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
878 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
879 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
880 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
883 MsanPoisonAllocaFn =
M.getOrInsertFunction(
884 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
885 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
886 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
890 return M.getOrInsertGlobal(Name, Ty, [&] {
892 nullptr, Name,
nullptr,
898void MemorySanitizer::createUserspaceApi(
Module &M,
906 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
907 :
"__msan_warning_with_origin_noreturn";
908 WarningFn =
M.getOrInsertFunction(WarningFnName,
910 IRB.getVoidTy(), IRB.getInt32Ty());
913 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
914 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
941 IRB.getIntPtrTy(
M.getDataLayout()));
945 unsigned AccessSize = 1 << AccessSizeIndex;
946 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
947 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
949 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
950 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
951 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
952 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
953 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
954 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
956 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
960 MsanSetAllocaOriginWithDescriptionFn =
961 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
962 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
963 MsanSetAllocaOriginNoDescriptionFn =
964 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
965 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
966 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
967 IRB.getVoidTy(), PtrTy, IntptrTy);
971void MemorySanitizer::initializeCallbacks(
Module &M,
974 if (CallbacksInitialized)
980 MsanChainOriginFn =
M.getOrInsertFunction(
981 "__msan_chain_origin",
984 MsanSetOriginFn =
M.getOrInsertFunction(
986 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
988 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
990 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
991 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
993 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
995 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
996 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
999 createKernelApi(M, TLI);
1001 createUserspaceApi(M, TLI);
1003 CallbacksInitialized =
true;
1009 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1027void MemorySanitizer::initializeModule(
Module &M) {
1028 auto &
DL =
M.getDataLayout();
1030 TargetTriple =
M.getTargetTriple();
1032 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1033 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1035 if (ShadowPassed || OriginPassed) {
1040 MapParams = &CustomMapParams;
1042 switch (TargetTriple.getOS()) {
1044 switch (TargetTriple.getArch()) {
1059 switch (TargetTriple.getArch()) {
1068 switch (TargetTriple.getArch()) {
1102 C = &(
M.getContext());
1104 IntptrTy = IRB.getIntPtrTy(
DL);
1105 OriginTy = IRB.getInt32Ty();
1106 PtrTy = IRB.getPtrTy();
1111 if (!CompileKernel) {
1113 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1114 return new GlobalVariable(
1115 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1116 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1120 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1121 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1122 GlobalValue::WeakODRLinkage,
1123 IRB.getInt32(Recover),
"__msan_keep_going");
1138struct VarArgHelper {
1139 virtual ~VarArgHelper() =
default;
1142 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1145 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1148 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1154 virtual void finalizeInstrumentation() = 0;
1157struct MemorySanitizerVisitor;
1162 MemorySanitizerVisitor &Visitor);
1169 if (TypeSizeFixed <= 8)
1178class NextNodeIRBuilder :
public IRBuilder<> {
1191struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1193 MemorySanitizer &MS;
1195 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1196 std::unique_ptr<VarArgHelper> VAHelper;
1197 const TargetLibraryInfo *TLI;
1204 bool PropagateShadow;
1207 bool PoisonUndefVectors;
1209 struct ShadowOriginAndInsertPoint {
1214 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1215 : Shadow(S), Origin(
O), OrigIns(
I) {}
1218 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1219 SmallSetVector<AllocaInst *, 16> AllocaSet;
1222 int64_t SplittableBlocksCount = 0;
1224 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1225 const TargetLibraryInfo &TLI)
1227 bool SanitizeFunction =
1229 InsertChecks = SanitizeFunction;
1230 PropagateShadow = SanitizeFunction;
1241 MS.initializeCallbacks(*
F.getParent(), TLI);
1243 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1244 .CreateIntrinsic(Intrinsic::donothing, {});
1246 if (MS.CompileKernel) {
1248 insertKmsanPrologue(IRB);
1252 <<
"MemorySanitizer is not inserting checks into '"
1253 <<
F.getName() <<
"'\n");
1256 bool instrumentWithCalls(
Value *V) {
1260 ++SplittableBlocksCount;
1265 bool isInPrologue(Instruction &
I) {
1266 return I.getParent() == FnPrologueEnd->
getParent() &&
1275 if (MS.TrackOrigins <= 1)
1277 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1281 const DataLayout &
DL =
F.getDataLayout();
1282 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1292 TypeSize TS, Align Alignment) {
1293 const DataLayout &
DL =
F.getDataLayout();
1294 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1295 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1307 auto [InsertPt,
Index] =
1319 Align CurrentAlignment = Alignment;
1320 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1321 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1323 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1328 CurrentAlignment = IntptrAlignment;
1341 Value *OriginPtr, Align Alignment) {
1342 const DataLayout &
DL =
F.getDataLayout();
1344 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1346 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1355 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1362 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1364 if (instrumentWithCalls(ConvertedShadow) &&
1366 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1367 Value *ConvertedShadow2 =
1369 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1373 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1377 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1382 void materializeStores() {
1383 for (StoreInst *SI : StoreList) {
1385 Value *Val =
SI->getValueOperand();
1386 Value *Addr =
SI->getPointerOperand();
1387 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1388 Value *ShadowPtr, *OriginPtr;
1390 const Align Alignment =
SI->getAlign();
1392 std::tie(ShadowPtr, OriginPtr) =
1393 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1395 [[maybe_unused]] StoreInst *NewSI =
1402 if (MS.TrackOrigins && !
SI->isAtomic())
1403 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1410 if (MS.TrackOrigins < 2)
1413 if (LazyWarningDebugLocationCount.
empty())
1414 for (
const auto &
I : InstrumentationList)
1415 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1431 auto NewDebugLoc = OI->getDebugLoc();
1438 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1439 Origin = updateOrigin(Origin, IRBOrigin);
1444 if (MS.CompileKernel || MS.TrackOrigins)
1455 const DataLayout &
DL =
F.getDataLayout();
1456 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1458 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1460 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1461 Value *ConvertedShadow2 =
1465 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1469 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1473 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1476 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1479 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1480 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1485 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1488 !MS.Recover, MS.ColdCallWeights);
1491 insertWarningFn(IRB, Origin);
1496 void materializeInstructionChecks(
1498 const DataLayout &
DL =
F.getDataLayout();
1501 bool Combine = !MS.TrackOrigins;
1503 Value *Shadow =
nullptr;
1504 for (
const auto &ShadowData : InstructionChecks) {
1505 assert(ShadowData.OrigIns == Instruction);
1508 Value *ConvertedShadow = ShadowData.Shadow;
1517 insertWarningFn(IRB, ShadowData.Origin);
1527 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1532 Shadow = ConvertedShadow;
1536 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1537 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1538 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1544 materializeOneCheck(IRB, Shadow,
nullptr);
1548 static bool isAArch64SVCount(
Type *Ty) {
1550 return TTy->
getName() ==
"aarch64.svcount";
1556 static bool isScalableNonVectorType(
Type *Ty) {
1557 if (!isAArch64SVCount(Ty))
1558 LLVM_DEBUG(
dbgs() <<
"isScalableNonVectorType: Unexpected type " << *Ty
1564 void materializeChecks() {
1567 SmallPtrSet<Instruction *, 16>
Done;
1570 for (
auto I = InstrumentationList.begin();
1571 I != InstrumentationList.end();) {
1572 auto OrigIns =
I->OrigIns;
1576 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1577 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1578 return OrigIns != R.OrigIns;
1592 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1593 {Zero, IRB.getInt32(0)},
"param_shadow");
1594 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1595 {Zero, IRB.getInt32(1)},
"retval_shadow");
1596 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1597 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1598 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1599 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1600 MS.VAArgOverflowSizeTLS =
1601 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1602 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1603 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1604 {Zero, IRB.getInt32(5)},
"param_origin");
1605 MS.RetvalOriginTLS =
1606 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1607 {Zero, IRB.getInt32(6)},
"retval_origin");
1609 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1622 for (Instruction *
I : Instructions)
1626 for (PHINode *PN : ShadowPHINodes) {
1628 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1629 size_t NumValues = PN->getNumIncomingValues();
1630 for (
size_t v = 0;
v < NumValues;
v++) {
1631 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1633 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1637 VAHelper->finalizeInstrumentation();
1642 for (
auto Item : LifetimeStartList) {
1643 instrumentAlloca(*Item.second, Item.first);
1644 AllocaSet.
remove(Item.second);
1649 for (AllocaInst *AI : AllocaSet)
1650 instrumentAlloca(*AI);
1653 materializeChecks();
1657 materializeStores();
1663 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1674 const DataLayout &
DL =
F.getDataLayout();
1676 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1678 VT->getElementCount());
1681 return ArrayType::get(getShadowTy(AT->getElementType()),
1682 AT->getNumElements());
1686 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1687 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1689 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1692 if (isScalableNonVectorType(OrigTy)) {
1693 LLVM_DEBUG(
dbgs() <<
"getShadowTy: Scalable non-vector type: " << *OrigTy
1698 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1703 Value *collapseStructShadow(StructType *Struct,
Value *Shadow,
1708 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1711 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1713 if (Aggregator != FalseVal)
1714 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1716 Aggregator = ShadowBool;
1723 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1725 if (!
Array->getNumElements())
1729 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1731 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1733 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1734 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1744 return collapseStructShadow(Struct, V, IRB);
1746 return collapseArrayShadow(Array, V, IRB);
1751 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1759 Type *VTy =
V->getType();
1761 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1768 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1770 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1771 VectTy->getElementCount());
1777 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1779 return VectorType::get(
1780 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1781 VectTy->getElementCount());
1783 assert(IntPtrTy == MS.IntptrTy);
1790 VectTy->getElementCount(),
1791 constToIntPtr(VectTy->getElementType(),
C));
1793 assert(IntPtrTy == MS.IntptrTy);
1796 return ConstantInt::get(MS.IntptrTy,
C,
false,
1810 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 if (uint64_t AndMask = MS.MapParams->AndMask)
1814 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1816 if (uint64_t XorMask = MS.MapParams->XorMask)
1817 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1829 std::pair<Value *, Value *>
1831 MaybeAlign Alignment) {
1836 assert(VectTy->getElementType()->isPointerTy());
1838 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1839 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1840 Value *ShadowLong = ShadowOffset;
1841 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1843 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1846 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1848 Value *OriginPtr =
nullptr;
1849 if (MS.TrackOrigins) {
1850 Value *OriginLong = ShadowOffset;
1851 uint64_t OriginBase = MS.MapParams->OriginBase;
1852 if (OriginBase != 0)
1854 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1857 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1860 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1862 return std::make_pair(ShadowPtr, OriginPtr);
1865 template <
typename... ArgsTy>
1870 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1871 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1874 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1877 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1881 Value *ShadowOriginPtrs;
1882 const DataLayout &
DL =
F.getDataLayout();
1883 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1885 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1888 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1890 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1891 ShadowOriginPtrs = createMetadataCall(
1893 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1900 return std::make_pair(ShadowPtr, OriginPtr);
1906 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1913 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1918 Value *ShadowPtrs = ConstantInt::getNullValue(
1920 Value *OriginPtrs =
nullptr;
1921 if (MS.TrackOrigins)
1922 OriginPtrs = ConstantInt::getNullValue(
1924 for (
unsigned i = 0; i < NumElements; ++i) {
1927 auto [ShadowPtr, OriginPtr] =
1928 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1931 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1932 if (MS.TrackOrigins)
1934 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1936 return {ShadowPtrs, OriginPtrs};
1939 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1941 MaybeAlign Alignment,
1943 if (MS.CompileKernel)
1944 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1945 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1953 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1958 if (!MS.TrackOrigins)
1961 ConstantInt::get(MS.IntptrTy, ArgOffset),
1971 Value *getOriginPtrForRetval() {
1973 return MS.RetvalOriginTLS;
1978 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1979 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1984 if (!MS.TrackOrigins)
1986 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1987 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1988 OriginMap[
V] = Origin;
1992 Type *ShadowTy = getShadowTy(OrigTy);
2002 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
2010 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
2011 getPoisonedShadow(AT->getElementType()));
2015 SmallVector<Constant *, 4> Vals;
2016 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
2017 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2025 Type *ShadowTy = getShadowTy(V);
2028 return getPoisonedShadow(ShadowTy);
2040 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2041 return getCleanShadow(V);
2043 Value *Shadow = ShadowMap[
V];
2045 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2046 assert(Shadow &&
"No shadow for a value");
2053 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2054 : getCleanShadow(V);
2060 Value *&ShadowPtr = ShadowMap[
V];
2065 unsigned ArgOffset = 0;
2066 const DataLayout &
DL =
F->getDataLayout();
2067 for (
auto &FArg :
F->args()) {
2068 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2070 ?
"vscale not fully supported\n"
2071 :
"Arg is not sized\n"));
2073 ShadowPtr = getCleanShadow(V);
2074 setOrigin(
A, getCleanOrigin());
2080 unsigned Size = FArg.hasByValAttr()
2081 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2082 :
DL.getTypeAllocSize(FArg.getType());
2086 if (FArg.hasByValAttr()) {
2090 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2091 FArg.getParamAlign(), FArg.getParamByValType());
2092 Value *CpShadowPtr, *CpOriginPtr;
2093 std::tie(CpShadowPtr, CpOriginPtr) =
2094 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2096 if (!PropagateShadow || Overflow) {
2098 EntryIRB.CreateMemSet(
2102 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2104 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2105 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2108 if (MS.TrackOrigins) {
2109 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2113 EntryIRB.CreateMemCpy(
2122 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2123 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2124 ShadowPtr = getCleanShadow(V);
2125 setOrigin(
A, getCleanOrigin());
2128 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2129 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2131 if (MS.TrackOrigins) {
2132 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2133 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2137 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2143 assert(ShadowPtr &&
"Could not find shadow for an argument");
2150 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2151 PoisonUndefVectors) {
2154 for (
unsigned i = 0; i != NumElems; ++i) {
2157 : getCleanShadow(Elem);
2161 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2162 << *ShadowConstant <<
"\n");
2164 return ShadowConstant;
2170 return getCleanShadow(V);
2174 Value *getShadow(Instruction *
I,
int i) {
2175 return getShadow(
I->getOperand(i));
2180 if (!MS.TrackOrigins)
2183 return getCleanOrigin();
2185 "Unexpected value type in getOrigin()");
2187 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2188 return getCleanOrigin();
2190 Value *Origin = OriginMap[
V];
2191 assert(Origin &&
"Missing origin");
2196 Value *getOrigin(Instruction *
I,
int i) {
2197 return getOrigin(
I->getOperand(i));
2204 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2210 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2211 << *OrigIns <<
"\n");
2216 if (isScalableNonVectorType(ShadowTy)) {
2217 LLVM_DEBUG(
dbgs() <<
"Skipping check of scalable non-vector " << *Shadow
2218 <<
" before " << *OrigIns <<
"\n");
2224 "Can only insert checks for integer, vector, and aggregate shadow "
2227 InstrumentationList.push_back(
2228 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2236 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2238 Value *Shadow, *Origin;
2240 Shadow = getShadow(Val);
2243 Origin = getOrigin(Val);
2250 insertCheckShadow(Shadow, Origin, OrigIns);
2255 case AtomicOrdering::NotAtomic:
2256 return AtomicOrdering::NotAtomic;
2257 case AtomicOrdering::Unordered:
2258 case AtomicOrdering::Monotonic:
2259 case AtomicOrdering::Release:
2260 return AtomicOrdering::Release;
2261 case AtomicOrdering::Acquire:
2262 case AtomicOrdering::AcquireRelease:
2263 return AtomicOrdering::AcquireRelease;
2264 case AtomicOrdering::SequentiallyConsistent:
2265 return AtomicOrdering::SequentiallyConsistent;
2271 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2272 uint32_t OrderingTable[NumOrderings] = {};
2274 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2275 OrderingTable[(
int)AtomicOrderingCABI::release] =
2276 (int)AtomicOrderingCABI::release;
2277 OrderingTable[(int)AtomicOrderingCABI::consume] =
2278 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2279 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2280 (
int)AtomicOrderingCABI::acq_rel;
2281 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2282 (
int)AtomicOrderingCABI::seq_cst;
2289 case AtomicOrdering::NotAtomic:
2290 return AtomicOrdering::NotAtomic;
2291 case AtomicOrdering::Unordered:
2292 case AtomicOrdering::Monotonic:
2293 case AtomicOrdering::Acquire:
2294 return AtomicOrdering::Acquire;
2295 case AtomicOrdering::Release:
2296 case AtomicOrdering::AcquireRelease:
2297 return AtomicOrdering::AcquireRelease;
2298 case AtomicOrdering::SequentiallyConsistent:
2299 return AtomicOrdering::SequentiallyConsistent;
2305 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2306 uint32_t OrderingTable[NumOrderings] = {};
2308 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2309 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2310 OrderingTable[(int)AtomicOrderingCABI::consume] =
2311 (
int)AtomicOrderingCABI::acquire;
2312 OrderingTable[(int)AtomicOrderingCABI::release] =
2313 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2314 (int)AtomicOrderingCABI::acq_rel;
2315 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2316 (
int)AtomicOrderingCABI::seq_cst;
2322 using InstVisitor<MemorySanitizerVisitor>
::visit;
2323 void visit(Instruction &
I) {
2324 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2327 if (isInPrologue(
I))
2332 setShadow(&
I, getCleanShadow(&
I));
2333 setOrigin(&
I, getCleanOrigin());
2344 void visitLoadInst(LoadInst &
I) {
2345 assert(
I.getType()->isSized() &&
"Load type must have size");
2346 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2347 NextNodeIRBuilder IRB(&
I);
2348 Type *ShadowTy = getShadowTy(&
I);
2349 Value *Addr =
I.getPointerOperand();
2350 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2351 const Align Alignment =
I.getAlign();
2352 if (PropagateShadow) {
2353 std::tie(ShadowPtr, OriginPtr) =
2354 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2358 setShadow(&
I, getCleanShadow(&
I));
2362 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2367 if (MS.TrackOrigins) {
2368 if (PropagateShadow) {
2373 setOrigin(&
I, getCleanOrigin());
2382 void visitStoreInst(StoreInst &
I) {
2383 StoreList.push_back(&
I);
2385 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2388 void handleCASOrRMW(Instruction &
I) {
2392 Value *Addr =
I.getOperand(0);
2393 Value *Val =
I.getOperand(1);
2394 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2399 insertCheckShadowOf(Addr, &
I);
2405 insertCheckShadowOf(Val, &
I);
2409 setShadow(&
I, getCleanShadow(&
I));
2410 setOrigin(&
I, getCleanOrigin());
2413 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2418 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2424 void visitExtractElementInst(ExtractElementInst &
I) {
2425 insertCheckShadowOf(
I.getOperand(1), &
I);
2429 setOrigin(&
I, getOrigin(&
I, 0));
2432 void visitInsertElementInst(InsertElementInst &
I) {
2433 insertCheckShadowOf(
I.getOperand(2), &
I);
2435 auto *Shadow0 = getShadow(&
I, 0);
2436 auto *Shadow1 = getShadow(&
I, 1);
2439 setOriginForNaryOp(
I);
2442 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2444 auto *Shadow0 = getShadow(&
I, 0);
2445 auto *Shadow1 = getShadow(&
I, 1);
2448 setOriginForNaryOp(
I);
2452 void visitSExtInst(SExtInst &
I) {
2454 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2455 setOrigin(&
I, getOrigin(&
I, 0));
2458 void visitZExtInst(ZExtInst &
I) {
2460 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2461 setOrigin(&
I, getOrigin(&
I, 0));
2464 void visitTruncInst(TruncInst &
I) {
2466 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2467 setOrigin(&
I, getOrigin(&
I, 0));
2470 void visitBitCastInst(BitCastInst &
I) {
2475 if (CI->isMustTailCall())
2479 setOrigin(&
I, getOrigin(&
I, 0));
2482 void visitPtrToIntInst(PtrToIntInst &
I) {
2485 "_msprop_ptrtoint"));
2486 setOrigin(&
I, getOrigin(&
I, 0));
2489 void visitIntToPtrInst(IntToPtrInst &
I) {
2492 "_msprop_inttoptr"));
2493 setOrigin(&
I, getOrigin(&
I, 0));
2496 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2497 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2498 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2499 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2500 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2501 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2525 return IRB.
CreateOr({S1S2, V1S2, S1V2});
2529 void visitAnd(BinaryOperator &
I) {
2531 Value *V1 =
I.getOperand(0);
2532 Value *V2 =
I.getOperand(1);
2534 Value *S2 = getShadow(&
I, 1);
2536 Value *OutShadow = handleBitwiseAnd(IRB, V1, V2,
S1, S2);
2538 setShadow(&
I, OutShadow);
2539 setOriginForNaryOp(
I);
2542 void visitOr(BinaryOperator &
I) {
2555 Value *S2 = getShadow(&
I, 1);
2556 Value *V1 =
I.getOperand(0);
2557 Value *V2 =
I.getOperand(1);
2576 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2580 setOriginForNaryOp(
I);
2598 template <
bool CombineShadow>
class Combiner {
2599 Value *Shadow =
nullptr;
2600 Value *Origin =
nullptr;
2602 MemorySanitizerVisitor *MSV;
2605 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2606 : IRB(IRB), MSV(MSV) {}
2610 if (CombineShadow) {
2615 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2616 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2620 if (MSV->MS.TrackOrigins) {
2627 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2628 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2638 Value *OpShadow = MSV->getShadow(V);
2639 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2640 return Add(OpShadow, OpOrigin);
2645 void Done(Instruction *
I) {
2646 if (CombineShadow) {
2648 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2649 MSV->setShadow(
I, Shadow);
2651 if (MSV->MS.TrackOrigins) {
2653 MSV->setOrigin(
I, Origin);
2659 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2660 if (MSV->MS.TrackOrigins) {
2667 using ShadowAndOriginCombiner = Combiner<true>;
2668 using OriginCombiner = Combiner<false>;
2671 void setOriginForNaryOp(Instruction &
I) {
2672 if (!MS.TrackOrigins)
2675 OriginCombiner OC(
this, IRB);
2676 for (Use &
Op :
I.operands())
2681 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2683 "Vector of pointers is not a valid shadow type");
2693 Type *srcTy =
V->getType();
2696 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2697 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2698 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2716 Type *ShadowTy = getShadowTy(V);
2717 if (
V->getType() == ShadowTy)
2719 if (
V->getType()->isPtrOrPtrVectorTy())
2726 void handleShadowOr(Instruction &
I) {
2728 ShadowAndOriginCombiner SC(
this, IRB);
2729 for (Use &
Op :
I.operands())
2756 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2757 unsigned Shards,
Value *VectorA,
Value *VectorB) {
2762 [[maybe_unused]]
unsigned TotalNumElems = NumElems;
2768 assert(NumElems % (ReductionFactor * Shards) == 0);
2773 for (
unsigned i = 0; i < ReductionFactor; i++) {
2774 SmallVector<int, 16>
Mask;
2776 for (
unsigned j = 0;
j < Shards;
j++) {
2777 unsigned Offset = NumElems / Shards *
j;
2779 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2783 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2808 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards) {
2809 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2811 assert(
I.getType()->isVectorTy());
2812 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2814 [[maybe_unused]] FixedVectorType *ParamType =
2818 [[maybe_unused]] FixedVectorType *
ReturnType =
2826 Value *FirstArgShadow = getShadow(&
I, 0);
2827 Value *SecondArgShadow =
nullptr;
2828 if (
I.arg_size() == 2)
2829 SecondArgShadow = getShadow(&
I, 1);
2831 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2832 FirstArgShadow, SecondArgShadow);
2834 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2836 setShadow(&
I, OrShadow);
2837 setOriginForNaryOp(
I);
2847 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards,
2848 int ReinterpretElemWidth) {
2849 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2851 assert(
I.getType()->isVectorTy());
2852 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2854 FixedVectorType *ParamType =
2859 [[maybe_unused]] FixedVectorType *
ReturnType =
2866 FixedVectorType *ReinterpretShadowTy =
nullptr;
2874 Value *FirstArgShadow = getShadow(&
I, 0);
2875 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2885 Value *SecondArgShadow =
nullptr;
2886 if (
I.arg_size() == 2) {
2887 SecondArgShadow = getShadow(&
I, 1);
2888 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2891 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2892 FirstArgShadow, SecondArgShadow);
2894 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2896 setShadow(&
I, OrShadow);
2897 setOriginForNaryOp(
I);
2900 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2911 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2917 Type *EltTy = VTy->getElementType();
2919 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2920 if (ConstantInt *Elt =
2922 const APInt &
V = Elt->getValue();
2923 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2924 Elements.push_back(ConstantInt::get(EltTy, V2));
2926 Elements.push_back(ConstantInt::get(EltTy, 1));
2932 const APInt &
V = Elt->getValue();
2933 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2934 ShadowMul = ConstantInt::get(Ty, V2);
2936 ShadowMul = ConstantInt::get(Ty, 1);
2942 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2943 setOrigin(&
I, getOrigin(OtherArg));
2946 void visitMul(BinaryOperator &
I) {
2949 if (constOp0 && !constOp1)
2950 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2951 else if (constOp1 && !constOp0)
2952 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2957 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2958 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2959 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2960 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2961 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2962 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2964 void handleIntegerDiv(Instruction &
I) {
2967 insertCheckShadowOf(
I.getOperand(1), &
I);
2968 setShadow(&
I, getShadow(&
I, 0));
2969 setOrigin(&
I, getOrigin(&
I, 0));
2972 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2973 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2974 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2975 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2979 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2980 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2986 void handleEqualityComparison(ICmpInst &
I) {
2990 Value *Sa = getShadow(
A);
2991 Value *Sb = getShadow(
B);
3017 setOriginForNaryOp(
I);
3025 void handleRelationalComparisonExact(ICmpInst &
I) {
3029 Value *Sa = getShadow(
A);
3030 Value *Sb = getShadow(
B);
3041 bool IsSigned =
I.isSigned();
3043 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
3053 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
3058 return std::make_pair(Min, Max);
3061 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3062 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3068 setOriginForNaryOp(
I);
3075 void handleSignedRelationalComparison(ICmpInst &
I) {
3080 op =
I.getOperand(0);
3081 pre =
I.getPredicate();
3083 op =
I.getOperand(1);
3084 pre =
I.getSwappedPredicate();
3097 setShadow(&
I, Shadow);
3098 setOrigin(&
I, getOrigin(
op));
3104 void visitICmpInst(ICmpInst &
I) {
3109 if (
I.isEquality()) {
3110 handleEqualityComparison(
I);
3116 handleRelationalComparisonExact(
I);
3120 handleSignedRelationalComparison(
I);
3126 handleRelationalComparisonExact(
I);
3133 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3135 void handleShift(BinaryOperator &
I) {
3140 Value *S2 = getShadow(&
I, 1);
3143 Value *V2 =
I.getOperand(1);
3145 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3146 setOriginForNaryOp(
I);
3149 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3150 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3151 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3153 void handleFunnelShift(IntrinsicInst &
I) {
3157 Value *S0 = getShadow(&
I, 0);
3159 Value *S2 = getShadow(&
I, 2);
3162 Value *V2 =
I.getOperand(2);
3165 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3166 setOriginForNaryOp(
I);
3179 void visitMemMoveInst(MemMoveInst &
I) {
3180 getShadow(
I.getArgOperand(1));
3183 {I.getArgOperand(0), I.getArgOperand(1),
3184 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3202 void visitMemCpyInst(MemCpyInst &
I) {
3203 getShadow(
I.getArgOperand(1));
3206 {I.getArgOperand(0), I.getArgOperand(1),
3207 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3212 void visitMemSetInst(MemSetInst &
I) {
3216 {I.getArgOperand(0),
3217 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3218 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3222 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3224 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3230 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3234 Value *Addr =
I.getArgOperand(0);
3235 Value *Shadow = getShadow(&
I, 1);
3236 Value *ShadowPtr, *OriginPtr;
3240 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3245 insertCheckShadowOf(Addr, &
I);
3248 if (MS.TrackOrigins)
3257 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3261 Value *Addr =
I.getArgOperand(0);
3263 Type *ShadowTy = getShadowTy(&
I);
3264 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3265 if (PropagateShadow) {
3269 std::tie(ShadowPtr, OriginPtr) =
3270 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3274 setShadow(&
I, getCleanShadow(&
I));
3278 insertCheckShadowOf(Addr, &
I);
3280 if (MS.TrackOrigins) {
3281 if (PropagateShadow)
3282 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3284 setOrigin(&
I, getCleanOrigin());
3304 [[maybe_unused]]
bool
3305 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3306 unsigned int trailingFlags) {
3307 Type *RetTy =
I.getType();
3311 unsigned NumArgOperands =
I.arg_size();
3312 assert(NumArgOperands >= trailingFlags);
3313 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3314 Type *Ty =
I.getArgOperand(i)->getType();
3320 ShadowAndOriginCombiner SC(
this, IRB);
3321 for (
unsigned i = 0; i < NumArgOperands; ++i)
3322 SC.Add(
I.getArgOperand(i));
3339 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3340 unsigned NumArgOperands =
I.arg_size();
3341 if (NumArgOperands == 0)
3344 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3345 I.getArgOperand(1)->getType()->isVectorTy() &&
3346 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3348 return handleVectorStoreIntrinsic(
I);
3351 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3352 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3354 return handleVectorLoadIntrinsic(
I);
3357 if (
I.doesNotAccessMemory())
3358 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3366 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3367 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3371 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3378 void handleInvariantGroup(IntrinsicInst &
I) {
3379 setShadow(&
I, getShadow(&
I, 0));
3380 setOrigin(&
I, getOrigin(&
I, 0));
3383 void handleLifetimeStart(IntrinsicInst &
I) {
3388 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3391 void handleBswap(IntrinsicInst &
I) {
3394 Type *OpType =
Op->getType();
3397 setOrigin(&
I, getOrigin(
Op));
3418 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3420 Value *Src =
I.getArgOperand(0);
3421 Value *SrcShadow = getShadow(Src);
3425 I.getType(),
I.getIntrinsicID(), {Src, False});
3427 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3430 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3432 Value *NotAllZeroShadow =
3434 Value *OutputShadow =
3435 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3441 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3444 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3446 setShadow(&
I, OutputShadow);
3447 setOriginForNaryOp(
I);
3462 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I,
bool FixedPoint) {
3469 Value *S0 = getShadow(&
I, 0);
3472 Value *Precision =
I.getOperand(1);
3473 insertCheckShadowOf(Precision, &
I);
3483 setShadow(&
I, OutShadow);
3484 setOriginForNaryOp(
I);
3493 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3513 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3518 Value *FullShadow = getCleanShadow(&
I);
3519 unsigned ShadowNumElems =
3521 unsigned FullShadowNumElems =
3524 assert((ShadowNumElems == FullShadowNumElems) ||
3525 (ShadowNumElems * 2 == FullShadowNumElems));
3527 if (ShadowNumElems == FullShadowNumElems) {
3528 FullShadow = Shadow;
3532 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3557 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3558 bool HasRoundingMode) {
3559 if (HasRoundingMode) {
3567 Value *Src =
I.getArgOperand(0);
3568 assert(Src->getType()->isVectorTy());
3572 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3575 Value *S0 = getShadow(&
I, 0);
3587 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3589 setShadow(&
I, FullShadow);
3590 setOriginForNaryOp(
I);
3611 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3612 bool HasRoundingMode =
false) {
3614 Value *CopyOp, *ConvertOp;
3616 assert((!HasRoundingMode ||
3618 "Invalid rounding mode");
3620 switch (
I.arg_size() - HasRoundingMode) {
3622 CopyOp =
I.getArgOperand(0);
3623 ConvertOp =
I.getArgOperand(1);
3626 ConvertOp =
I.getArgOperand(0);
3640 Value *ConvertShadow = getShadow(ConvertOp);
3641 Value *AggShadow =
nullptr;
3644 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3645 for (
int i = 1; i < NumUsedElements; ++i) {
3647 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3648 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3651 AggShadow = ConvertShadow;
3654 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3661 Value *ResultShadow = getShadow(CopyOp);
3663 for (
int i = 0; i < NumUsedElements; ++i) {
3665 ResultShadow, ConstantInt::getNullValue(EltTy),
3668 setShadow(&
I, ResultShadow);
3669 setOrigin(&
I, getOrigin(CopyOp));
3671 setShadow(&
I, getCleanShadow(&
I));
3672 setOrigin(&
I, getCleanOrigin());
3680 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3683 return CreateShadowCast(IRB, S2,
T,
true);
3691 return CreateShadowCast(IRB, S2,
T,
true);
3708 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3714 Value *S2 = getShadow(&
I, 1);
3716 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3717 Value *V1 =
I.getOperand(0);
3718 Value *V2 =
I.getOperand(1);
3720 {IRB.CreateBitCast(S1, V1->getType()), V2});
3722 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3723 setOriginForNaryOp(
I);
3728 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3729 unsigned X86_MMXSizeInBits = 64) {
3730 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3731 "Illegal MMX vector element size");
3733 X86_MMXSizeInBits / EltSizeInBits);
3740 case Intrinsic::x86_sse2_packsswb_128:
3741 case Intrinsic::x86_sse2_packuswb_128:
3742 return Intrinsic::x86_sse2_packsswb_128;
3744 case Intrinsic::x86_sse2_packssdw_128:
3745 case Intrinsic::x86_sse41_packusdw:
3746 return Intrinsic::x86_sse2_packssdw_128;
3748 case Intrinsic::x86_avx2_packsswb:
3749 case Intrinsic::x86_avx2_packuswb:
3750 return Intrinsic::x86_avx2_packsswb;
3752 case Intrinsic::x86_avx2_packssdw:
3753 case Intrinsic::x86_avx2_packusdw:
3754 return Intrinsic::x86_avx2_packssdw;
3756 case Intrinsic::x86_mmx_packsswb:
3757 case Intrinsic::x86_mmx_packuswb:
3758 return Intrinsic::x86_mmx_packsswb;
3760 case Intrinsic::x86_mmx_packssdw:
3761 return Intrinsic::x86_mmx_packssdw;
3763 case Intrinsic::x86_avx512_packssdw_512:
3764 case Intrinsic::x86_avx512_packusdw_512:
3765 return Intrinsic::x86_avx512_packssdw_512;
3767 case Intrinsic::x86_avx512_packsswb_512:
3768 case Intrinsic::x86_avx512_packuswb_512:
3769 return Intrinsic::x86_avx512_packsswb_512;
3785 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3786 unsigned MMXEltSizeInBits = 0) {
3790 Value *S2 = getShadow(&
I, 1);
3791 assert(
S1->getType()->isVectorTy());
3797 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3798 if (MMXEltSizeInBits) {
3806 if (MMXEltSizeInBits) {
3812 {S1_ext, S2_ext},
nullptr,
3813 "_msprop_vector_pack");
3814 if (MMXEltSizeInBits)
3817 setOriginForNaryOp(
I);
3821 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3822 SmallVector<Constant *, 4>
R(Width);
3834 const unsigned Width =
3841 Value *DstMaskV = createDppMask(Width, DstMask);
3858 void handleDppIntrinsic(IntrinsicInst &
I) {
3861 Value *S0 = getShadow(&
I, 0);
3865 const unsigned Width =
3867 assert(Width == 2 || Width == 4 || Width == 8);
3870 const unsigned SrcMask =
Mask >> 4;
3871 const unsigned DstMask =
Mask & 0xf;
3874 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3879 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3886 setOriginForNaryOp(
I);
3890 C = CreateAppToShadowCast(IRB,
C);
3899 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3904 Value *Sc = getShadow(&
I, 2);
3905 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3910 C = convertBlendvToSelectMask(IRB,
C);
3911 Sc = convertBlendvToSelectMask(IRB, Sc);
3917 handleSelectLikeInst(
I,
C,
T,
F);
3921 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3922 const unsigned SignificantBitsPerResultElement = 16;
3924 unsigned ZeroBitsPerResultElement =
3928 auto *Shadow0 = getShadow(&
I, 0);
3929 auto *Shadow1 = getShadow(&
I, 1);
3934 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3937 setOriginForNaryOp(
I);
3961 void handleVectorDotProductIntrinsic(IntrinsicInst &
I,
3962 unsigned ReductionFactor,
3964 unsigned EltSizeInBits = 0) {
3967 [[maybe_unused]] FixedVectorType *
ReturnType =
3972 Value *Va =
nullptr;
3973 Value *Vb =
nullptr;
3974 Value *Sa =
nullptr;
3975 Value *Sb =
nullptr;
3977 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3978 if (
I.arg_size() == 2) {
3979 Va =
I.getOperand(0);
3980 Vb =
I.getOperand(1);
3982 Sa = getShadow(&
I, 0);
3983 Sb = getShadow(&
I, 1);
3984 }
else if (
I.arg_size() == 3) {
3986 Va =
I.getOperand(1);
3987 Vb =
I.getOperand(2);
3989 Sa = getShadow(&
I, 1);
3990 Sb = getShadow(&
I, 2);
3999 if (
I.arg_size() == 3) {
4000 [[maybe_unused]]
auto *AccumulatorType =
4002 assert(AccumulatorType == ReturnType);
4005 FixedVectorType *ImplicitReturnType =
4008 if (EltSizeInBits) {
4010 getMMXVectorTy(EltSizeInBits * ReductionFactor,
4022 ReturnType->getNumElements() * ReductionFactor);
4039 VaInt = CreateAppToShadowCast(IRB, Va);
4040 VbInt = CreateAppToShadowCast(IRB, Vb);
4047 And = handleBitwiseAnd(IRB, VaNonZero, VbNonZero, SaNonZero, SbNonZero);
4069 ImplicitReturnType);
4074 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
4077 if (
I.arg_size() == 3)
4078 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
4080 setShadow(&
I, OutShadow);
4081 setOriginForNaryOp(
I);
4087 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4089 Type *ResTy = getShadowTy(&
I);
4090 auto *Shadow0 = getShadow(&
I, 0);
4091 auto *Shadow1 = getShadow(&
I, 1);
4096 setOriginForNaryOp(
I);
4102 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4104 auto *Shadow0 = getShadow(&
I, 0);
4105 auto *Shadow1 = getShadow(&
I, 1);
4107 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4109 setOriginForNaryOp(
I);
4118 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4123 if (AllowShadowCast)
4124 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4128 setOriginForNaryOp(
I);
4138 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4142 Value *Shadow0 = getShadow(&
I, 0);
4148 setOriginForNaryOp(
I);
4154 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4158 Value *OperandShadow = getShadow(&
I, 0);
4160 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4168 setOrigin(&
I, getOrigin(&
I, 0));
4174 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4178 Value *OperandShadow = getShadow(&
I, 0);
4179 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4187 setOrigin(&
I, getOrigin(&
I, 0));
4190 void handleStmxcsr(IntrinsicInst &
I) {
4192 Value *Addr =
I.getArgOperand(0);
4195 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4200 insertCheckShadowOf(Addr, &
I);
4203 void handleLdmxcsr(IntrinsicInst &
I) {
4208 Value *Addr =
I.getArgOperand(0);
4211 Value *ShadowPtr, *OriginPtr;
4212 std::tie(ShadowPtr, OriginPtr) =
4213 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4216 insertCheckShadowOf(Addr, &
I);
4219 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4221 insertCheckShadow(Shadow, Origin, &
I);
4224 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4226 Value *Ptr =
I.getArgOperand(0);
4227 MaybeAlign
Align =
I.getParamAlign(0);
4229 Value *PassThru =
I.getArgOperand(2);
4232 insertCheckShadowOf(Ptr, &
I);
4233 insertCheckShadowOf(Mask, &
I);
4236 if (!PropagateShadow) {
4237 setShadow(&
I, getCleanShadow(&
I));
4238 setOrigin(&
I, getCleanOrigin());
4242 Type *ShadowTy = getShadowTy(&
I);
4244 auto [ShadowPtr, OriginPtr] =
4245 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
false);
4249 getShadow(PassThru),
"_msmaskedexpload");
4251 setShadow(&
I, Shadow);
4254 setOrigin(&
I, getCleanOrigin());
4257 void handleMaskedCompressStore(IntrinsicInst &
I) {
4259 Value *Values =
I.getArgOperand(0);
4260 Value *Ptr =
I.getArgOperand(1);
4261 MaybeAlign
Align =
I.getParamAlign(1);
4265 insertCheckShadowOf(Ptr, &
I);
4266 insertCheckShadowOf(Mask, &
I);
4269 Value *Shadow = getShadow(Values);
4270 Type *ElementShadowTy =
4272 auto [ShadowPtr, OriginPtrs] =
4273 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
true);
4280 void handleMaskedGather(IntrinsicInst &
I) {
4282 Value *Ptrs =
I.getArgOperand(0);
4283 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4285 Value *PassThru =
I.getArgOperand(2);
4287 Type *PtrsShadowTy = getShadowTy(Ptrs);
4289 insertCheckShadowOf(Mask, &
I);
4293 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4296 if (!PropagateShadow) {
4297 setShadow(&
I, getCleanShadow(&
I));
4298 setOrigin(&
I, getCleanOrigin());
4302 Type *ShadowTy = getShadowTy(&
I);
4304 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4305 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4309 getShadow(PassThru),
"_msmaskedgather");
4311 setShadow(&
I, Shadow);
4314 setOrigin(&
I, getCleanOrigin());
4317 void handleMaskedScatter(IntrinsicInst &
I) {
4319 Value *Values =
I.getArgOperand(0);
4320 Value *Ptrs =
I.getArgOperand(1);
4321 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4324 Type *PtrsShadowTy = getShadowTy(Ptrs);
4326 insertCheckShadowOf(Mask, &
I);
4330 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4333 Value *Shadow = getShadow(Values);
4334 Type *ElementShadowTy =
4336 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4337 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4348 void handleMaskedStore(IntrinsicInst &
I) {
4350 Value *
V =
I.getArgOperand(0);
4351 Value *Ptr =
I.getArgOperand(1);
4352 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4354 Value *Shadow = getShadow(V);
4357 insertCheckShadowOf(Ptr, &
I);
4358 insertCheckShadowOf(Mask, &
I);
4363 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4364 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4368 if (!MS.TrackOrigins)
4371 auto &
DL =
F.getDataLayout();
4372 paintOrigin(IRB, getOrigin(V), OriginPtr,
4381 void handleMaskedLoad(IntrinsicInst &
I) {
4383 Value *Ptr =
I.getArgOperand(0);
4384 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4386 Value *PassThru =
I.getArgOperand(2);
4389 insertCheckShadowOf(Ptr, &
I);
4390 insertCheckShadowOf(Mask, &
I);
4393 if (!PropagateShadow) {
4394 setShadow(&
I, getCleanShadow(&
I));
4395 setOrigin(&
I, getCleanOrigin());
4399 Type *ShadowTy = getShadowTy(&
I);
4400 Value *ShadowPtr, *OriginPtr;
4401 std::tie(ShadowPtr, OriginPtr) =
4402 getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment,
false);
4404 getShadow(PassThru),
"_msmaskedld"));
4406 if (!MS.TrackOrigins)
4413 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4418 setOrigin(&
I, Origin);
4434 void handleAVXMaskedStore(IntrinsicInst &
I) {
4439 Value *Dst =
I.getArgOperand(0);
4440 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4445 Value *Src =
I.getArgOperand(2);
4450 Value *SrcShadow = getShadow(Src);
4453 insertCheckShadowOf(Dst, &
I);
4454 insertCheckShadowOf(Mask, &
I);
4457 Value *DstShadowPtr;
4458 Value *DstOriginPtr;
4459 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4460 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4462 SmallVector<Value *, 2> ShadowArgs;
4463 ShadowArgs.
append(1, DstShadowPtr);
4464 ShadowArgs.
append(1, Mask);
4475 if (!MS.TrackOrigins)
4479 auto &
DL =
F.getDataLayout();
4480 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4481 DL.getTypeStoreSize(SrcShadow->
getType()),
4500 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4505 Value *Src =
I.getArgOperand(0);
4506 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4514 insertCheckShadowOf(Mask, &
I);
4517 Type *SrcShadowTy = getShadowTy(Src);
4518 Value *SrcShadowPtr, *SrcOriginPtr;
4519 std::tie(SrcShadowPtr, SrcOriginPtr) =
4520 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4522 SmallVector<Value *, 2> ShadowArgs;
4523 ShadowArgs.
append(1, SrcShadowPtr);
4524 ShadowArgs.
append(1, Mask);
4533 if (!MS.TrackOrigins)
4540 setOrigin(&
I, PtrSrcOrigin);
4549 assert(isFixedIntVector(Idx));
4550 auto IdxVectorSize =
4558 auto *IdxShadow = getShadow(Idx);
4563 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4568 void handleAVXVpermilvar(IntrinsicInst &
I) {
4570 Value *Shadow = getShadow(&
I, 0);
4571 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4575 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4577 {Shadow, I.getArgOperand(1)});
4580 setOriginForNaryOp(
I);
4585 void handleAVXVpermi2var(IntrinsicInst &
I) {
4590 [[maybe_unused]]
auto ArgVectorSize =
4593 ->getNumElements() == ArgVectorSize);
4595 ->getNumElements() == ArgVectorSize);
4596 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4597 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4598 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4600 Value *AShadow = getShadow(&
I, 0);
4601 Value *Idx =
I.getArgOperand(1);
4602 Value *BShadow = getShadow(&
I, 2);
4604 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4608 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4609 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4611 {AShadow, Idx, BShadow});
4613 setOriginForNaryOp(
I);
4616 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4620 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4624 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4625 return isFixedIntVectorTy(
V->getType());
4628 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4629 return isFixedFPVectorTy(
V->getType());
4651 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4656 Value *WriteThrough;
4660 WriteThrough =
I.getOperand(2);
4661 Mask =
I.getOperand(3);
4664 WriteThrough =
I.getOperand(1);
4665 Mask =
I.getOperand(2);
4670 assert(isFixedIntVector(WriteThrough));
4672 unsigned ANumElements =
4674 [[maybe_unused]]
unsigned WriteThruNumElements =
4676 assert(ANumElements == WriteThruNumElements ||
4677 ANumElements * 2 == WriteThruNumElements);
4680 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4681 assert(ANumElements == MaskNumElements ||
4682 ANumElements * 2 == MaskNumElements);
4684 assert(WriteThruNumElements == MaskNumElements);
4688 insertCheckShadowOf(Mask, &
I);
4698 Value *AShadow = getShadow(
A);
4699 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4701 if (ANumElements * 2 == MaskNumElements) {
4713 "_ms_mask_bitcast");
4723 getShadowTy(&
I),
"_ms_a_shadow");
4725 Value *WriteThroughShadow = getShadow(WriteThrough);
4727 "_ms_writethru_select");
4729 setShadow(&
I, Shadow);
4730 setOriginForNaryOp(
I);
4738 void handleBmiIntrinsic(IntrinsicInst &
I) {
4740 Type *ShadowTy = getShadowTy(&
I);
4743 Value *SMask = getShadow(&
I, 1);
4748 {getShadow(&I, 0), I.getOperand(1)});
4751 setOriginForNaryOp(
I);
4754 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4755 SmallVector<int, 8>
Mask;
4756 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4770 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4775 "pclmul 3rd operand must be a constant");
4778 getPclmulMask(Width, Imm & 0x01));
4780 getPclmulMask(Width, Imm & 0x10));
4781 ShadowAndOriginCombiner SOC(
this, IRB);
4782 SOC.Add(Shuf0, getOrigin(&
I, 0));
4783 SOC.Add(Shuf1, getOrigin(&
I, 1));
4788 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4793 Value *Second = getShadow(&
I, 1);
4795 SmallVector<int, 16>
Mask;
4796 Mask.push_back(Width);
4797 for (
unsigned i = 1; i < Width; i++)
4801 setShadow(&
I, Shadow);
4802 setOriginForNaryOp(
I);
4805 void handleVtestIntrinsic(IntrinsicInst &
I) {
4807 Value *Shadow0 = getShadow(&
I, 0);
4808 Value *Shadow1 = getShadow(&
I, 1);
4814 setShadow(&
I, Shadow);
4815 setOriginForNaryOp(
I);
4818 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4823 Value *Second = getShadow(&
I, 1);
4826 SmallVector<int, 16>
Mask;
4827 Mask.push_back(Width);
4828 for (
unsigned i = 1; i < Width; i++)
4832 setShadow(&
I, Shadow);
4833 setOriginForNaryOp(
I);
4839 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4840 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4845 ShadowAndOriginCombiner SC(
this, IRB);
4846 SC.Add(
I.getArgOperand(0));
4854 void handleAbsIntrinsic(IntrinsicInst &
I) {
4856 Value *Src =
I.getArgOperand(0);
4857 Value *IsIntMinPoison =
I.getArgOperand(1);
4859 assert(
I.getType()->isIntOrIntVectorTy());
4861 assert(Src->getType() ==
I.getType());
4867 Value *SrcShadow = getShadow(Src);
4871 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4874 Value *PoisonedShadow = getPoisonedShadow(Src);
4875 Value *PoisonedIfIntMinShadow =
4878 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4880 setShadow(&
I, Shadow);
4881 setOrigin(&
I, getOrigin(&
I, 0));
4884 void handleIsFpClass(IntrinsicInst &
I) {
4886 Value *Shadow = getShadow(&
I, 0);
4887 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4888 setOrigin(&
I, getOrigin(&
I, 0));
4891 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4893 Value *Shadow0 = getShadow(&
I, 0);
4894 Value *Shadow1 = getShadow(&
I, 1);
4897 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4903 setShadow(&
I, Shadow);
4904 setOriginForNaryOp(
I);
4910 Value *Shadow = getShadow(V);
4932 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4937 Value *WriteThrough =
I.getOperand(1);
4941 assert(isFixedIntVector(WriteThrough));
4943 unsigned ANumElements =
4945 unsigned OutputNumElements =
4947 assert(ANumElements == OutputNumElements ||
4948 ANumElements * 2 == OutputNumElements);
4951 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4952 insertCheckShadowOf(Mask, &
I);
4963 if (ANumElements != OutputNumElements) {
4965 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4972 Value *AShadow = getShadow(
A);
4976 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4986 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4987 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4989 Value *WriteThroughShadow = getShadow(WriteThrough);
4992 setShadow(&
I, Shadow);
4993 setOriginForNaryOp(
I);
5020 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
5021 unsigned WriteThruIndex,
5022 unsigned MaskIndex) {
5025 unsigned NumArgs =
I.arg_size();
5026 assert(AIndex < NumArgs);
5027 assert(WriteThruIndex < NumArgs);
5028 assert(MaskIndex < NumArgs);
5029 assert(AIndex != WriteThruIndex);
5030 assert(AIndex != MaskIndex);
5031 assert(WriteThruIndex != MaskIndex);
5033 Value *
A =
I.getOperand(AIndex);
5034 Value *WriteThru =
I.getOperand(WriteThruIndex);
5038 assert(isFixedFPVector(WriteThru));
5040 [[maybe_unused]]
unsigned ANumElements =
5042 unsigned OutputNumElements =
5044 assert(ANumElements == OutputNumElements);
5046 for (
unsigned i = 0; i < NumArgs; ++i) {
5047 if (i != AIndex && i != WriteThruIndex) {
5050 assert(
I.getOperand(i)->getType()->isIntegerTy());
5051 insertCheckShadowOf(
I.getOperand(i), &
I);
5056 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
5058 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
5065 Value *AShadow = getShadow(
A);
5071 Value *WriteThruShadow = getShadow(WriteThru);
5074 setShadow(&
I, Shadow);
5076 setOriginForNaryOp(
I);
5086 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5092 Value *WriteThrough =
I.getOperand(2);
5099 insertCheckShadowOf(Mask, &
I);
5103 unsigned NumElements =
5105 assert(NumElements == 8);
5106 assert(
A->getType() ==
B->getType());
5108 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5111 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5112 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5114 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5116 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5123 Value *AShadow = getShadow(
A);
5124 Value *DstLowerShadow =
5125 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5127 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5130 setShadow(&
I, DstShadow);
5131 setOriginForNaryOp(
I);
5161 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5172 ->getScalarSizeInBits() == 8);
5174 assert(
A->getType() ==
X->getType());
5176 assert(
B->getType()->isIntegerTy());
5177 assert(
B->getType()->getScalarSizeInBits() == 8);
5179 assert(
I.getType() ==
A->getType());
5181 Value *AShadow = getShadow(
A);
5182 Value *XShadow = getShadow(
X);
5183 Value *BZeroShadow = getCleanShadow(
B);
5186 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5188 {X, AShadow, BZeroShadow});
5190 {XShadow, A, BZeroShadow});
5193 Value *BShadow = getShadow(
B);
5194 Value *BBroadcastShadow = getCleanShadow(AShadow);
5199 for (
unsigned i = 0; i < NumElements; i++)
5203 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5204 setOriginForNaryOp(
I);
5218 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5219 unsigned int numArgs =
I.arg_size();
5222 assert(
I.getType()->isStructTy());
5232 assert(4 <= numArgs && numArgs <= 6);
5246 for (
unsigned int i = 0; i < numArgs - 2; i++)
5247 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5250 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5254 insertCheckShadowOf(LaneNumber, &
I);
5257 Value *Src =
I.getArgOperand(numArgs - 1);
5258 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5260 Type *SrcShadowTy = getShadowTy(Src);
5261 auto [SrcShadowPtr, SrcOriginPtr] =
5262 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5272 if (!MS.TrackOrigins)
5276 setOrigin(&
I, PtrSrcOrigin);
5293 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5297 int numArgOperands =
I.arg_size();
5300 assert(numArgOperands >= 1);
5301 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5303 int skipTrailingOperands = 1;
5306 insertCheckShadowOf(Addr, &
I);
5310 skipTrailingOperands++;
5311 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5313 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5316 SmallVector<Value *, 8> ShadowArgs;
5318 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5320 Value *Shadow = getShadow(&
I, i);
5321 ShadowArgs.
append(1, Shadow);
5338 (numArgOperands - skipTrailingOperands));
5339 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5343 I.getArgOperand(numArgOperands - skipTrailingOperands));
5345 Value *OutputShadowPtr, *OutputOriginPtr;
5347 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5348 Addr, IRB, OutputShadowTy,
Align(1),
true);
5349 ShadowArgs.
append(1, OutputShadowPtr);
5355 if (MS.TrackOrigins) {
5363 OriginCombiner OC(
this, IRB);
5364 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5365 OC.Add(
I.getArgOperand(i));
5367 const DataLayout &
DL =
F.getDataLayout();
5368 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5401 void handleNEONMatrixMultiply(IntrinsicInst &
I,
unsigned int ARows,
5402 unsigned int ACols,
unsigned int BRows,
5403 unsigned int BCols) {
5407 Value *
R =
I.getArgOperand(0);
5408 Value *
A =
I.getArgOperand(1);
5409 Value *
B =
I.getArgOperand(2);
5411 assert(
I.getType() ==
R->getType());
5437 Value *ShadowR = getShadow(&
I, 0);
5438 Value *ShadowA = getShadow(&
I, 1);
5439 Value *ShadowB = getShadow(&
I, 2);
5450 I.getType(),
I.getIntrinsicID(), {getCleanShadow(R), ShadowA, ShadowB});
5463 setShadow(&
I, IRB.
CreateOr(ShadowAB, ShadowR));
5464 setOriginForNaryOp(
I);
5489 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5491 unsigned int trailingVerbatimArgs) {
5494 assert(trailingVerbatimArgs <
I.arg_size());
5496 SmallVector<Value *, 8> ShadowArgs;
5498 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5499 Value *Shadow = getShadow(&
I, i);
5507 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5509 Value *Arg =
I.getArgOperand(i);
5515 Value *CombinedShadow = CI;
5518 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5521 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5522 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5527 setOriginForNaryOp(
I);
5533 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5539 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5540 switch (
I.getIntrinsicID()) {
5541 case Intrinsic::uadd_with_overflow:
5542 case Intrinsic::sadd_with_overflow:
5543 case Intrinsic::usub_with_overflow:
5544 case Intrinsic::ssub_with_overflow:
5545 case Intrinsic::umul_with_overflow:
5546 case Intrinsic::smul_with_overflow:
5547 handleArithmeticWithOverflow(
I);
5549 case Intrinsic::abs:
5550 handleAbsIntrinsic(
I);
5552 case Intrinsic::bitreverse:
5553 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5556 case Intrinsic::is_fpclass:
5559 case Intrinsic::lifetime_start:
5560 handleLifetimeStart(
I);
5562 case Intrinsic::launder_invariant_group:
5563 case Intrinsic::strip_invariant_group:
5564 handleInvariantGroup(
I);
5566 case Intrinsic::bswap:
5569 case Intrinsic::ctlz:
5570 case Intrinsic::cttz:
5571 handleCountLeadingTrailingZeros(
I);
5573 case Intrinsic::masked_compressstore:
5574 handleMaskedCompressStore(
I);
5576 case Intrinsic::masked_expandload:
5577 handleMaskedExpandLoad(
I);
5579 case Intrinsic::masked_gather:
5580 handleMaskedGather(
I);
5582 case Intrinsic::masked_scatter:
5583 handleMaskedScatter(
I);
5585 case Intrinsic::masked_store:
5586 handleMaskedStore(
I);
5588 case Intrinsic::masked_load:
5589 handleMaskedLoad(
I);
5591 case Intrinsic::vector_reduce_and:
5592 handleVectorReduceAndIntrinsic(
I);
5594 case Intrinsic::vector_reduce_or:
5595 handleVectorReduceOrIntrinsic(
I);
5598 case Intrinsic::vector_reduce_add:
5599 case Intrinsic::vector_reduce_xor:
5600 case Intrinsic::vector_reduce_mul:
5603 case Intrinsic::vector_reduce_smax:
5604 case Intrinsic::vector_reduce_smin:
5605 case Intrinsic::vector_reduce_umax:
5606 case Intrinsic::vector_reduce_umin:
5609 case Intrinsic::vector_reduce_fmax:
5610 case Intrinsic::vector_reduce_fmin:
5611 handleVectorReduceIntrinsic(
I,
false);
5614 case Intrinsic::vector_reduce_fadd:
5615 case Intrinsic::vector_reduce_fmul:
5616 handleVectorReduceWithStarterIntrinsic(
I);
5619 case Intrinsic::scmp:
5620 case Intrinsic::ucmp: {
5625 case Intrinsic::fshl:
5626 case Intrinsic::fshr:
5627 handleFunnelShift(
I);
5630 case Intrinsic::is_constant:
5632 setShadow(&
I, getCleanShadow(&
I));
5633 setOrigin(&
I, getCleanOrigin());
5643 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5644 switch (
I.getIntrinsicID()) {
5645 case Intrinsic::x86_sse_stmxcsr:
5648 case Intrinsic::x86_sse_ldmxcsr:
5655 case Intrinsic::x86_avx512_vcvtsd2usi64:
5656 case Intrinsic::x86_avx512_vcvtsd2usi32:
5657 case Intrinsic::x86_avx512_vcvtss2usi64:
5658 case Intrinsic::x86_avx512_vcvtss2usi32:
5659 case Intrinsic::x86_avx512_cvttss2usi64:
5660 case Intrinsic::x86_avx512_cvttss2usi:
5661 case Intrinsic::x86_avx512_cvttsd2usi64:
5662 case Intrinsic::x86_avx512_cvttsd2usi:
5663 case Intrinsic::x86_avx512_cvtusi2ss:
5664 case Intrinsic::x86_avx512_cvtusi642sd:
5665 case Intrinsic::x86_avx512_cvtusi642ss:
5666 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5668 case Intrinsic::x86_sse2_cvtsd2si64:
5669 case Intrinsic::x86_sse2_cvtsd2si:
5670 case Intrinsic::x86_sse2_cvtsd2ss:
5671 case Intrinsic::x86_sse2_cvttsd2si64:
5672 case Intrinsic::x86_sse2_cvttsd2si:
5673 case Intrinsic::x86_sse_cvtss2si64:
5674 case Intrinsic::x86_sse_cvtss2si:
5675 case Intrinsic::x86_sse_cvttss2si64:
5676 case Intrinsic::x86_sse_cvttss2si:
5677 handleSSEVectorConvertIntrinsic(
I, 1);
5679 case Intrinsic::x86_sse_cvtps2pi:
5680 case Intrinsic::x86_sse_cvttps2pi:
5681 handleSSEVectorConvertIntrinsic(
I, 2);
5689 case Intrinsic::x86_vcvtps2ph_128:
5690 case Intrinsic::x86_vcvtps2ph_256: {
5691 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5700 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5701 handleAVX512VectorConvertFPToInt(
I,
false);
5706 case Intrinsic::x86_sse2_cvtpd2ps:
5707 case Intrinsic::x86_sse2_cvtps2dq:
5708 case Intrinsic::x86_sse2_cvtpd2dq:
5709 case Intrinsic::x86_sse2_cvttps2dq:
5710 case Intrinsic::x86_sse2_cvttpd2dq:
5711 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5712 case Intrinsic::x86_avx_cvt_ps2dq_256:
5713 case Intrinsic::x86_avx_cvt_pd2dq_256:
5714 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5715 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5716 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5727 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5728 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5729 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5730 handleAVX512VectorConvertFPToInt(
I,
true);
5734 case Intrinsic::x86_avx512_psll_w_512:
5735 case Intrinsic::x86_avx512_psll_d_512:
5736 case Intrinsic::x86_avx512_psll_q_512:
5737 case Intrinsic::x86_avx512_pslli_w_512:
5738 case Intrinsic::x86_avx512_pslli_d_512:
5739 case Intrinsic::x86_avx512_pslli_q_512:
5740 case Intrinsic::x86_avx512_psrl_w_512:
5741 case Intrinsic::x86_avx512_psrl_d_512:
5742 case Intrinsic::x86_avx512_psrl_q_512:
5743 case Intrinsic::x86_avx512_psra_w_512:
5744 case Intrinsic::x86_avx512_psra_d_512:
5745 case Intrinsic::x86_avx512_psra_q_512:
5746 case Intrinsic::x86_avx512_psrli_w_512:
5747 case Intrinsic::x86_avx512_psrli_d_512:
5748 case Intrinsic::x86_avx512_psrli_q_512:
5749 case Intrinsic::x86_avx512_psrai_w_512:
5750 case Intrinsic::x86_avx512_psrai_d_512:
5751 case Intrinsic::x86_avx512_psrai_q_512:
5752 case Intrinsic::x86_avx512_psra_q_256:
5753 case Intrinsic::x86_avx512_psra_q_128:
5754 case Intrinsic::x86_avx512_psrai_q_256:
5755 case Intrinsic::x86_avx512_psrai_q_128:
5756 case Intrinsic::x86_avx2_psll_w:
5757 case Intrinsic::x86_avx2_psll_d:
5758 case Intrinsic::x86_avx2_psll_q:
5759 case Intrinsic::x86_avx2_pslli_w:
5760 case Intrinsic::x86_avx2_pslli_d:
5761 case Intrinsic::x86_avx2_pslli_q:
5762 case Intrinsic::x86_avx2_psrl_w:
5763 case Intrinsic::x86_avx2_psrl_d:
5764 case Intrinsic::x86_avx2_psrl_q:
5765 case Intrinsic::x86_avx2_psra_w:
5766 case Intrinsic::x86_avx2_psra_d:
5767 case Intrinsic::x86_avx2_psrli_w:
5768 case Intrinsic::x86_avx2_psrli_d:
5769 case Intrinsic::x86_avx2_psrli_q:
5770 case Intrinsic::x86_avx2_psrai_w:
5771 case Intrinsic::x86_avx2_psrai_d:
5772 case Intrinsic::x86_sse2_psll_w:
5773 case Intrinsic::x86_sse2_psll_d:
5774 case Intrinsic::x86_sse2_psll_q:
5775 case Intrinsic::x86_sse2_pslli_w:
5776 case Intrinsic::x86_sse2_pslli_d:
5777 case Intrinsic::x86_sse2_pslli_q:
5778 case Intrinsic::x86_sse2_psrl_w:
5779 case Intrinsic::x86_sse2_psrl_d:
5780 case Intrinsic::x86_sse2_psrl_q:
5781 case Intrinsic::x86_sse2_psra_w:
5782 case Intrinsic::x86_sse2_psra_d:
5783 case Intrinsic::x86_sse2_psrli_w:
5784 case Intrinsic::x86_sse2_psrli_d:
5785 case Intrinsic::x86_sse2_psrli_q:
5786 case Intrinsic::x86_sse2_psrai_w:
5787 case Intrinsic::x86_sse2_psrai_d:
5788 case Intrinsic::x86_mmx_psll_w:
5789 case Intrinsic::x86_mmx_psll_d:
5790 case Intrinsic::x86_mmx_psll_q:
5791 case Intrinsic::x86_mmx_pslli_w:
5792 case Intrinsic::x86_mmx_pslli_d:
5793 case Intrinsic::x86_mmx_pslli_q:
5794 case Intrinsic::x86_mmx_psrl_w:
5795 case Intrinsic::x86_mmx_psrl_d:
5796 case Intrinsic::x86_mmx_psrl_q:
5797 case Intrinsic::x86_mmx_psra_w:
5798 case Intrinsic::x86_mmx_psra_d:
5799 case Intrinsic::x86_mmx_psrli_w:
5800 case Intrinsic::x86_mmx_psrli_d:
5801 case Intrinsic::x86_mmx_psrli_q:
5802 case Intrinsic::x86_mmx_psrai_w:
5803 case Intrinsic::x86_mmx_psrai_d:
5804 handleVectorShiftIntrinsic(
I,
false);
5806 case Intrinsic::x86_avx2_psllv_d:
5807 case Intrinsic::x86_avx2_psllv_d_256:
5808 case Intrinsic::x86_avx512_psllv_d_512:
5809 case Intrinsic::x86_avx2_psllv_q:
5810 case Intrinsic::x86_avx2_psllv_q_256:
5811 case Intrinsic::x86_avx512_psllv_q_512:
5812 case Intrinsic::x86_avx2_psrlv_d:
5813 case Intrinsic::x86_avx2_psrlv_d_256:
5814 case Intrinsic::x86_avx512_psrlv_d_512:
5815 case Intrinsic::x86_avx2_psrlv_q:
5816 case Intrinsic::x86_avx2_psrlv_q_256:
5817 case Intrinsic::x86_avx512_psrlv_q_512:
5818 case Intrinsic::x86_avx2_psrav_d:
5819 case Intrinsic::x86_avx2_psrav_d_256:
5820 case Intrinsic::x86_avx512_psrav_d_512:
5821 case Intrinsic::x86_avx512_psrav_q_128:
5822 case Intrinsic::x86_avx512_psrav_q_256:
5823 case Intrinsic::x86_avx512_psrav_q_512:
5824 handleVectorShiftIntrinsic(
I,
true);
5828 case Intrinsic::x86_sse2_packsswb_128:
5829 case Intrinsic::x86_sse2_packssdw_128:
5830 case Intrinsic::x86_sse2_packuswb_128:
5831 case Intrinsic::x86_sse41_packusdw:
5832 case Intrinsic::x86_avx2_packsswb:
5833 case Intrinsic::x86_avx2_packssdw:
5834 case Intrinsic::x86_avx2_packuswb:
5835 case Intrinsic::x86_avx2_packusdw:
5841 case Intrinsic::x86_avx512_packsswb_512:
5842 case Intrinsic::x86_avx512_packssdw_512:
5843 case Intrinsic::x86_avx512_packuswb_512:
5844 case Intrinsic::x86_avx512_packusdw_512:
5845 handleVectorPackIntrinsic(
I);
5848 case Intrinsic::x86_sse41_pblendvb:
5849 case Intrinsic::x86_sse41_blendvpd:
5850 case Intrinsic::x86_sse41_blendvps:
5851 case Intrinsic::x86_avx_blendv_pd_256:
5852 case Intrinsic::x86_avx_blendv_ps_256:
5853 case Intrinsic::x86_avx2_pblendvb:
5854 handleBlendvIntrinsic(
I);
5857 case Intrinsic::x86_avx_dp_ps_256:
5858 case Intrinsic::x86_sse41_dppd:
5859 case Intrinsic::x86_sse41_dpps:
5860 handleDppIntrinsic(
I);
5863 case Intrinsic::x86_mmx_packsswb:
5864 case Intrinsic::x86_mmx_packuswb:
5865 handleVectorPackIntrinsic(
I, 16);
5868 case Intrinsic::x86_mmx_packssdw:
5869 handleVectorPackIntrinsic(
I, 32);
5872 case Intrinsic::x86_mmx_psad_bw:
5873 handleVectorSadIntrinsic(
I,
true);
5875 case Intrinsic::x86_sse2_psad_bw:
5876 case Intrinsic::x86_avx2_psad_bw:
5877 handleVectorSadIntrinsic(
I);
5903 case Intrinsic::x86_sse2_pmadd_wd:
5904 case Intrinsic::x86_avx2_pmadd_wd:
5905 case Intrinsic::x86_avx512_pmaddw_d_512:
5906 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5907 case Intrinsic::x86_avx2_pmadd_ub_sw:
5908 case Intrinsic::x86_avx512_pmaddubs_w_512:
5909 handleVectorDotProductIntrinsic(
I, 2,
5914 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5915 handleVectorDotProductIntrinsic(
I, 2,
5921 case Intrinsic::x86_mmx_pmadd_wd:
5922 handleVectorDotProductIntrinsic(
I, 2,
6019 case Intrinsic::x86_avx512_vpdpbusd_128:
6020 case Intrinsic::x86_avx512_vpdpbusd_256:
6021 case Intrinsic::x86_avx512_vpdpbusd_512:
6022 case Intrinsic::x86_avx512_vpdpbusds_128:
6023 case Intrinsic::x86_avx512_vpdpbusds_256:
6024 case Intrinsic::x86_avx512_vpdpbusds_512:
6025 case Intrinsic::x86_avx2_vpdpbssd_128:
6026 case Intrinsic::x86_avx2_vpdpbssd_256:
6027 case Intrinsic::x86_avx10_vpdpbssd_512:
6028 case Intrinsic::x86_avx2_vpdpbssds_128:
6029 case Intrinsic::x86_avx2_vpdpbssds_256:
6030 case Intrinsic::x86_avx10_vpdpbssds_512:
6031 case Intrinsic::x86_avx2_vpdpbsud_128:
6032 case Intrinsic::x86_avx2_vpdpbsud_256:
6033 case Intrinsic::x86_avx10_vpdpbsud_512:
6034 case Intrinsic::x86_avx2_vpdpbsuds_128:
6035 case Intrinsic::x86_avx2_vpdpbsuds_256:
6036 case Intrinsic::x86_avx10_vpdpbsuds_512:
6037 case Intrinsic::x86_avx2_vpdpbuud_128:
6038 case Intrinsic::x86_avx2_vpdpbuud_256:
6039 case Intrinsic::x86_avx10_vpdpbuud_512:
6040 case Intrinsic::x86_avx2_vpdpbuuds_128:
6041 case Intrinsic::x86_avx2_vpdpbuuds_256:
6042 case Intrinsic::x86_avx10_vpdpbuuds_512:
6043 handleVectorDotProductIntrinsic(
I, 4,
6139 case Intrinsic::x86_avx512_vpdpwssd_128:
6140 case Intrinsic::x86_avx512_vpdpwssd_256:
6141 case Intrinsic::x86_avx512_vpdpwssd_512:
6142 case Intrinsic::x86_avx512_vpdpwssds_128:
6143 case Intrinsic::x86_avx512_vpdpwssds_256:
6144 case Intrinsic::x86_avx512_vpdpwssds_512:
6145 case Intrinsic::x86_avx2_vpdpwsud_128:
6146 case Intrinsic::x86_avx2_vpdpwsud_256:
6147 case Intrinsic::x86_avx10_vpdpwsud_512:
6148 case Intrinsic::x86_avx2_vpdpwsuds_128:
6149 case Intrinsic::x86_avx2_vpdpwsuds_256:
6150 case Intrinsic::x86_avx10_vpdpwsuds_512:
6151 case Intrinsic::x86_avx2_vpdpwusd_128:
6152 case Intrinsic::x86_avx2_vpdpwusd_256:
6153 case Intrinsic::x86_avx10_vpdpwusd_512:
6154 case Intrinsic::x86_avx2_vpdpwusds_128:
6155 case Intrinsic::x86_avx2_vpdpwusds_256:
6156 case Intrinsic::x86_avx10_vpdpwusds_512:
6157 case Intrinsic::x86_avx2_vpdpwuud_128:
6158 case Intrinsic::x86_avx2_vpdpwuud_256:
6159 case Intrinsic::x86_avx10_vpdpwuud_512:
6160 case Intrinsic::x86_avx2_vpdpwuuds_128:
6161 case Intrinsic::x86_avx2_vpdpwuuds_256:
6162 case Intrinsic::x86_avx10_vpdpwuuds_512:
6163 handleVectorDotProductIntrinsic(
I, 2,
6175 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
6176 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
6177 case Intrinsic::x86_avx512bf16_dpbf16ps_512:
6178 handleVectorDotProductIntrinsic(
I, 2,
6182 case Intrinsic::x86_sse_cmp_ss:
6183 case Intrinsic::x86_sse2_cmp_sd:
6184 case Intrinsic::x86_sse_comieq_ss:
6185 case Intrinsic::x86_sse_comilt_ss:
6186 case Intrinsic::x86_sse_comile_ss:
6187 case Intrinsic::x86_sse_comigt_ss:
6188 case Intrinsic::x86_sse_comige_ss:
6189 case Intrinsic::x86_sse_comineq_ss:
6190 case Intrinsic::x86_sse_ucomieq_ss:
6191 case Intrinsic::x86_sse_ucomilt_ss:
6192 case Intrinsic::x86_sse_ucomile_ss:
6193 case Intrinsic::x86_sse_ucomigt_ss:
6194 case Intrinsic::x86_sse_ucomige_ss:
6195 case Intrinsic::x86_sse_ucomineq_ss:
6196 case Intrinsic::x86_sse2_comieq_sd:
6197 case Intrinsic::x86_sse2_comilt_sd:
6198 case Intrinsic::x86_sse2_comile_sd:
6199 case Intrinsic::x86_sse2_comigt_sd:
6200 case Intrinsic::x86_sse2_comige_sd:
6201 case Intrinsic::x86_sse2_comineq_sd:
6202 case Intrinsic::x86_sse2_ucomieq_sd:
6203 case Intrinsic::x86_sse2_ucomilt_sd:
6204 case Intrinsic::x86_sse2_ucomile_sd:
6205 case Intrinsic::x86_sse2_ucomigt_sd:
6206 case Intrinsic::x86_sse2_ucomige_sd:
6207 case Intrinsic::x86_sse2_ucomineq_sd:
6208 handleVectorCompareScalarIntrinsic(
I);
6211 case Intrinsic::x86_avx_cmp_pd_256:
6212 case Intrinsic::x86_avx_cmp_ps_256:
6213 case Intrinsic::x86_sse2_cmp_pd:
6214 case Intrinsic::x86_sse_cmp_ps:
6215 handleVectorComparePackedIntrinsic(
I);
6218 case Intrinsic::x86_bmi_bextr_32:
6219 case Intrinsic::x86_bmi_bextr_64:
6220 case Intrinsic::x86_bmi_bzhi_32:
6221 case Intrinsic::x86_bmi_bzhi_64:
6222 case Intrinsic::x86_bmi_pdep_32:
6223 case Intrinsic::x86_bmi_pdep_64:
6224 case Intrinsic::x86_bmi_pext_32:
6225 case Intrinsic::x86_bmi_pext_64:
6226 handleBmiIntrinsic(
I);
6229 case Intrinsic::x86_pclmulqdq:
6230 case Intrinsic::x86_pclmulqdq_256:
6231 case Intrinsic::x86_pclmulqdq_512:
6232 handlePclmulIntrinsic(
I);
6235 case Intrinsic::x86_avx_round_pd_256:
6236 case Intrinsic::x86_avx_round_ps_256:
6237 case Intrinsic::x86_sse41_round_pd:
6238 case Intrinsic::x86_sse41_round_ps:
6239 handleRoundPdPsIntrinsic(
I);
6242 case Intrinsic::x86_sse41_round_sd:
6243 case Intrinsic::x86_sse41_round_ss:
6244 handleUnarySdSsIntrinsic(
I);
6247 case Intrinsic::x86_sse2_max_sd:
6248 case Intrinsic::x86_sse_max_ss:
6249 case Intrinsic::x86_sse2_min_sd:
6250 case Intrinsic::x86_sse_min_ss:
6251 handleBinarySdSsIntrinsic(
I);
6254 case Intrinsic::x86_avx_vtestc_pd:
6255 case Intrinsic::x86_avx_vtestc_pd_256:
6256 case Intrinsic::x86_avx_vtestc_ps:
6257 case Intrinsic::x86_avx_vtestc_ps_256:
6258 case Intrinsic::x86_avx_vtestnzc_pd:
6259 case Intrinsic::x86_avx_vtestnzc_pd_256:
6260 case Intrinsic::x86_avx_vtestnzc_ps:
6261 case Intrinsic::x86_avx_vtestnzc_ps_256:
6262 case Intrinsic::x86_avx_vtestz_pd:
6263 case Intrinsic::x86_avx_vtestz_pd_256:
6264 case Intrinsic::x86_avx_vtestz_ps:
6265 case Intrinsic::x86_avx_vtestz_ps_256:
6266 case Intrinsic::x86_avx_ptestc_256:
6267 case Intrinsic::x86_avx_ptestnzc_256:
6268 case Intrinsic::x86_avx_ptestz_256:
6269 case Intrinsic::x86_sse41_ptestc:
6270 case Intrinsic::x86_sse41_ptestnzc:
6271 case Intrinsic::x86_sse41_ptestz:
6272 handleVtestIntrinsic(
I);
6276 case Intrinsic::x86_ssse3_phadd_w:
6277 case Intrinsic::x86_ssse3_phadd_w_128:
6278 case Intrinsic::x86_ssse3_phsub_w:
6279 case Intrinsic::x86_ssse3_phsub_w_128:
6280 handlePairwiseShadowOrIntrinsic(
I, 1,
6284 case Intrinsic::x86_avx2_phadd_w:
6285 case Intrinsic::x86_avx2_phsub_w:
6286 handlePairwiseShadowOrIntrinsic(
I, 2,
6291 case Intrinsic::x86_ssse3_phadd_d:
6292 case Intrinsic::x86_ssse3_phadd_d_128:
6293 case Intrinsic::x86_ssse3_phsub_d:
6294 case Intrinsic::x86_ssse3_phsub_d_128:
6295 handlePairwiseShadowOrIntrinsic(
I, 1,
6299 case Intrinsic::x86_avx2_phadd_d:
6300 case Intrinsic::x86_avx2_phsub_d:
6301 handlePairwiseShadowOrIntrinsic(
I, 2,
6306 case Intrinsic::x86_ssse3_phadd_sw:
6307 case Intrinsic::x86_ssse3_phadd_sw_128:
6308 case Intrinsic::x86_ssse3_phsub_sw:
6309 case Intrinsic::x86_ssse3_phsub_sw_128:
6310 handlePairwiseShadowOrIntrinsic(
I, 1,
6314 case Intrinsic::x86_avx2_phadd_sw:
6315 case Intrinsic::x86_avx2_phsub_sw:
6316 handlePairwiseShadowOrIntrinsic(
I, 2,
6321 case Intrinsic::x86_sse3_hadd_ps:
6322 case Intrinsic::x86_sse3_hadd_pd:
6323 case Intrinsic::x86_sse3_hsub_ps:
6324 case Intrinsic::x86_sse3_hsub_pd:
6325 handlePairwiseShadowOrIntrinsic(
I, 1);
6328 case Intrinsic::x86_avx_hadd_pd_256:
6329 case Intrinsic::x86_avx_hadd_ps_256:
6330 case Intrinsic::x86_avx_hsub_pd_256:
6331 case Intrinsic::x86_avx_hsub_ps_256:
6332 handlePairwiseShadowOrIntrinsic(
I, 2);
6335 case Intrinsic::x86_avx_maskstore_ps:
6336 case Intrinsic::x86_avx_maskstore_pd:
6337 case Intrinsic::x86_avx_maskstore_ps_256:
6338 case Intrinsic::x86_avx_maskstore_pd_256:
6339 case Intrinsic::x86_avx2_maskstore_d:
6340 case Intrinsic::x86_avx2_maskstore_q:
6341 case Intrinsic::x86_avx2_maskstore_d_256:
6342 case Intrinsic::x86_avx2_maskstore_q_256: {
6343 handleAVXMaskedStore(
I);
6347 case Intrinsic::x86_avx_maskload_ps:
6348 case Intrinsic::x86_avx_maskload_pd:
6349 case Intrinsic::x86_avx_maskload_ps_256:
6350 case Intrinsic::x86_avx_maskload_pd_256:
6351 case Intrinsic::x86_avx2_maskload_d:
6352 case Intrinsic::x86_avx2_maskload_q:
6353 case Intrinsic::x86_avx2_maskload_d_256:
6354 case Intrinsic::x86_avx2_maskload_q_256: {
6355 handleAVXMaskedLoad(
I);
6360 case Intrinsic::x86_avx512fp16_add_ph_512:
6361 case Intrinsic::x86_avx512fp16_sub_ph_512:
6362 case Intrinsic::x86_avx512fp16_mul_ph_512:
6363 case Intrinsic::x86_avx512fp16_div_ph_512:
6364 case Intrinsic::x86_avx512fp16_max_ph_512:
6365 case Intrinsic::x86_avx512fp16_min_ph_512:
6366 case Intrinsic::x86_avx512_min_ps_512:
6367 case Intrinsic::x86_avx512_min_pd_512:
6368 case Intrinsic::x86_avx512_max_ps_512:
6369 case Intrinsic::x86_avx512_max_pd_512: {
6374 [[maybe_unused]]
bool Success =
6375 maybeHandleSimpleNomemIntrinsic(
I, 1);
6380 case Intrinsic::x86_avx_vpermilvar_pd:
6381 case Intrinsic::x86_avx_vpermilvar_pd_256:
6382 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6383 case Intrinsic::x86_avx_vpermilvar_ps:
6384 case Intrinsic::x86_avx_vpermilvar_ps_256:
6385 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6386 handleAVXVpermilvar(
I);
6390 case Intrinsic::x86_avx512_vpermi2var_d_128:
6391 case Intrinsic::x86_avx512_vpermi2var_d_256:
6392 case Intrinsic::x86_avx512_vpermi2var_d_512:
6393 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6394 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6395 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6396 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6397 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6398 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6399 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6400 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6401 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6402 case Intrinsic::x86_avx512_vpermi2var_q_128:
6403 case Intrinsic::x86_avx512_vpermi2var_q_256:
6404 case Intrinsic::x86_avx512_vpermi2var_q_512:
6405 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6406 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6407 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6408 handleAVXVpermi2var(
I);
6422 case Intrinsic::x86_avx2_pshuf_b:
6423 case Intrinsic::x86_sse_pshuf_w:
6424 case Intrinsic::x86_ssse3_pshuf_b_128:
6425 case Intrinsic::x86_ssse3_pshuf_b:
6426 case Intrinsic::x86_avx512_pshuf_b_512:
6427 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6433 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6434 case Intrinsic::x86_avx512_mask_pmov_db_512:
6435 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6436 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6439 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6447 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6448 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6449 handleIntrinsicByApplyingToShadow(
I,
6450 Intrinsic::x86_avx512_mask_pmov_dw_512,
6455 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6456 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6457 handleIntrinsicByApplyingToShadow(
I,
6458 Intrinsic::x86_avx512_mask_pmov_db_512,
6463 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6464 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6465 handleIntrinsicByApplyingToShadow(
I,
6466 Intrinsic::x86_avx512_mask_pmov_qb_512,
6471 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6472 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6473 handleIntrinsicByApplyingToShadow(
I,
6474 Intrinsic::x86_avx512_mask_pmov_qw_512,
6479 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6480 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6481 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6482 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6486 handleAVX512VectorDownConvert(
I);
6526 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6527 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6528 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6529 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6530 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6531 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6532 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6533 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6534 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6535 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6536 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6537 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6538 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6578 case Intrinsic::x86_avx512_rcp14_ps_512:
6579 case Intrinsic::x86_avx512_rcp14_ps_256:
6580 case Intrinsic::x86_avx512_rcp14_ps_128:
6581 case Intrinsic::x86_avx512_rcp14_pd_512:
6582 case Intrinsic::x86_avx512_rcp14_pd_256:
6583 case Intrinsic::x86_avx512_rcp14_pd_128:
6584 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6585 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6586 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6587 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6588 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6589 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6590 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6634 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6635 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6636 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6637 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6638 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6639 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6640 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6641 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6642 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6643 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6644 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6645 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6646 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6651 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6652 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6653 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6654 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6655 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6656 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6657 visitGenericScalarHalfwordInst(
I);
6662 case Intrinsic::x86_vgf2p8affineqb_128:
6663 case Intrinsic::x86_vgf2p8affineqb_256:
6664 case Intrinsic::x86_vgf2p8affineqb_512:
6665 handleAVXGF2P8Affine(
I);
6675 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6676 switch (
I.getIntrinsicID()) {
6677 case Intrinsic::aarch64_neon_rshrn:
6678 case Intrinsic::aarch64_neon_sqrshl:
6679 case Intrinsic::aarch64_neon_sqrshrn:
6680 case Intrinsic::aarch64_neon_sqrshrun:
6681 case Intrinsic::aarch64_neon_sqshl:
6682 case Intrinsic::aarch64_neon_sqshlu:
6683 case Intrinsic::aarch64_neon_sqshrn:
6684 case Intrinsic::aarch64_neon_sqshrun:
6685 case Intrinsic::aarch64_neon_srshl:
6686 case Intrinsic::aarch64_neon_sshl:
6687 case Intrinsic::aarch64_neon_uqrshl:
6688 case Intrinsic::aarch64_neon_uqrshrn:
6689 case Intrinsic::aarch64_neon_uqshl:
6690 case Intrinsic::aarch64_neon_uqshrn:
6691 case Intrinsic::aarch64_neon_urshl:
6692 case Intrinsic::aarch64_neon_ushl:
6694 handleVectorShiftIntrinsic(
I,
false);
6699 case Intrinsic::aarch64_neon_fmaxp:
6700 case Intrinsic::aarch64_neon_fminp:
6702 case Intrinsic::aarch64_neon_fmaxnmp:
6703 case Intrinsic::aarch64_neon_fminnmp:
6705 case Intrinsic::aarch64_neon_smaxp:
6706 case Intrinsic::aarch64_neon_sminp:
6707 case Intrinsic::aarch64_neon_umaxp:
6708 case Intrinsic::aarch64_neon_uminp:
6710 case Intrinsic::aarch64_neon_addp:
6712 case Intrinsic::aarch64_neon_faddp:
6714 case Intrinsic::aarch64_neon_saddlp:
6715 case Intrinsic::aarch64_neon_uaddlp: {
6716 handlePairwiseShadowOrIntrinsic(
I, 1);
6721 case Intrinsic::aarch64_neon_fcvtas:
6722 case Intrinsic::aarch64_neon_fcvtau:
6724 case Intrinsic::aarch64_neon_fcvtms:
6725 case Intrinsic::aarch64_neon_fcvtmu:
6727 case Intrinsic::aarch64_neon_fcvtns:
6728 case Intrinsic::aarch64_neon_fcvtnu:
6730 case Intrinsic::aarch64_neon_fcvtps:
6731 case Intrinsic::aarch64_neon_fcvtpu:
6733 case Intrinsic::aarch64_neon_fcvtzs:
6734 case Intrinsic::aarch64_neon_fcvtzu:
6736 case Intrinsic::aarch64_neon_fcvtxn:
6738 case Intrinsic::aarch64_neon_vcvthf2fp:
6739 case Intrinsic::aarch64_neon_vcvtfp2hf:
6740 handleNEONVectorConvertIntrinsic(
I,
false);
6744 case Intrinsic::aarch64_neon_vcvtfxs2fp:
6745 case Intrinsic::aarch64_neon_vcvtfp2fxs:
6746 case Intrinsic::aarch64_neon_vcvtfxu2fp:
6747 case Intrinsic::aarch64_neon_vcvtfp2fxu:
6748 handleNEONVectorConvertIntrinsic(
I,
true);
6757 case Intrinsic::aarch64_neon_faddv:
6758 case Intrinsic::aarch64_neon_saddv:
6759 case Intrinsic::aarch64_neon_uaddv:
6762 case Intrinsic::aarch64_neon_smaxv:
6763 case Intrinsic::aarch64_neon_sminv:
6764 case Intrinsic::aarch64_neon_umaxv:
6765 case Intrinsic::aarch64_neon_uminv:
6769 case Intrinsic::aarch64_neon_fmaxv:
6770 case Intrinsic::aarch64_neon_fminv:
6771 case Intrinsic::aarch64_neon_fmaxnmv:
6772 case Intrinsic::aarch64_neon_fminnmv:
6774 case Intrinsic::aarch64_neon_saddlv:
6775 case Intrinsic::aarch64_neon_uaddlv:
6776 handleVectorReduceIntrinsic(
I,
true);
6779 case Intrinsic::aarch64_neon_ld1x2:
6780 case Intrinsic::aarch64_neon_ld1x3:
6781 case Intrinsic::aarch64_neon_ld1x4:
6782 case Intrinsic::aarch64_neon_ld2:
6783 case Intrinsic::aarch64_neon_ld3:
6784 case Intrinsic::aarch64_neon_ld4:
6785 case Intrinsic::aarch64_neon_ld2r:
6786 case Intrinsic::aarch64_neon_ld3r:
6787 case Intrinsic::aarch64_neon_ld4r: {
6788 handleNEONVectorLoad(
I,
false);
6792 case Intrinsic::aarch64_neon_ld2lane:
6793 case Intrinsic::aarch64_neon_ld3lane:
6794 case Intrinsic::aarch64_neon_ld4lane: {
6795 handleNEONVectorLoad(
I,
true);
6800 case Intrinsic::aarch64_neon_sqxtn:
6801 case Intrinsic::aarch64_neon_sqxtun:
6802 case Intrinsic::aarch64_neon_uqxtn:
6809 case Intrinsic::aarch64_neon_st1x2:
6810 case Intrinsic::aarch64_neon_st1x3:
6811 case Intrinsic::aarch64_neon_st1x4:
6812 case Intrinsic::aarch64_neon_st2:
6813 case Intrinsic::aarch64_neon_st3:
6814 case Intrinsic::aarch64_neon_st4: {
6815 handleNEONVectorStoreIntrinsic(
I,
false);
6819 case Intrinsic::aarch64_neon_st2lane:
6820 case Intrinsic::aarch64_neon_st3lane:
6821 case Intrinsic::aarch64_neon_st4lane: {
6822 handleNEONVectorStoreIntrinsic(
I,
true);
6835 case Intrinsic::aarch64_neon_tbl1:
6836 case Intrinsic::aarch64_neon_tbl2:
6837 case Intrinsic::aarch64_neon_tbl3:
6838 case Intrinsic::aarch64_neon_tbl4:
6839 case Intrinsic::aarch64_neon_tbx1:
6840 case Intrinsic::aarch64_neon_tbx2:
6841 case Intrinsic::aarch64_neon_tbx3:
6842 case Intrinsic::aarch64_neon_tbx4: {
6844 handleIntrinsicByApplyingToShadow(
6845 I,
I.getIntrinsicID(),
6850 case Intrinsic::aarch64_neon_fmulx:
6851 case Intrinsic::aarch64_neon_pmul:
6852 case Intrinsic::aarch64_neon_pmull:
6853 case Intrinsic::aarch64_neon_smull:
6854 case Intrinsic::aarch64_neon_pmull64:
6855 case Intrinsic::aarch64_neon_umull: {
6856 handleNEONVectorMultiplyIntrinsic(
I);
6860 case Intrinsic::aarch64_neon_smmla:
6861 case Intrinsic::aarch64_neon_ummla:
6862 case Intrinsic::aarch64_neon_usmmla:
6863 handleNEONMatrixMultiply(
I, 2, 8, 8,
6871 case Intrinsic::aarch64_neon_sdot:
6872 case Intrinsic::aarch64_neon_udot:
6873 handleVectorDotProductIntrinsic(
I, 4,
6881 case Intrinsic::aarch64_neon_bfdot:
6882 handleVectorDotProductIntrinsic(
I, 2,
6893 void visitIntrinsicInst(IntrinsicInst &
I) {
6894 if (maybeHandleCrossPlatformIntrinsic(
I))
6897 if (maybeHandleX86SIMDIntrinsic(
I))
6900 if (maybeHandleArmSIMDIntrinsic(
I))
6903 if (maybeHandleUnknownIntrinsic(
I))
6906 visitInstruction(
I);
6909 void visitLibAtomicLoad(CallBase &CB) {
6920 Value *NewOrdering =
6924 NextNodeIRBuilder NextIRB(&CB);
6925 Value *SrcShadowPtr, *SrcOriginPtr;
6926 std::tie(SrcShadowPtr, SrcOriginPtr) =
6927 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6929 Value *DstShadowPtr =
6930 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6934 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6935 if (MS.TrackOrigins) {
6936 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6938 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6939 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6943 void visitLibAtomicStore(CallBase &CB) {
6950 Value *NewOrdering =
6954 Value *DstShadowPtr =
6964 void visitCallBase(CallBase &CB) {
6972 visitAsmInstruction(CB);
6974 visitInstruction(CB);
6983 case LibFunc_atomic_load:
6985 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6989 visitLibAtomicLoad(CB);
6991 case LibFunc_atomic_store:
6992 visitLibAtomicStore(CB);
7008 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
7012 Func->removeFnAttrs(
B);
7018 bool MayCheckCall = MS.EagerChecks;
7022 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
7025 unsigned ArgOffset = 0;
7028 if (!
A->getType()->isSized()) {
7029 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
7033 if (
A->getType()->isScalableTy()) {
7034 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
7036 insertCheckShadowOf(
A, &CB);
7041 const DataLayout &
DL =
F.getDataLayout();
7045 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
7048 insertCheckShadowOf(
A, &CB);
7049 Size =
DL.getTypeAllocSize(
A->getType());
7055 Value *ArgShadow = getShadow(
A);
7056 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
7058 <<
" Shadow: " << *ArgShadow <<
"\n");
7062 assert(
A->getType()->isPointerTy() &&
7063 "ByVal argument is not a pointer!");
7068 MaybeAlign Alignment = std::nullopt;
7071 Value *AShadowPtr, *AOriginPtr;
7072 std::tie(AShadowPtr, AOriginPtr) =
7073 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
7075 if (!PropagateShadow) {
7082 if (MS.TrackOrigins) {
7083 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
7097 Size =
DL.getTypeAllocSize(
A->getType());
7103 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
7105 getOriginPtrForArgument(IRB, ArgOffset));
7108 assert(Store !=
nullptr);
7117 if (FT->isVarArg()) {
7118 VAHelper->visitCallBase(CB, IRB);
7128 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
7129 setShadow(&CB, getCleanShadow(&CB));
7130 setOrigin(&CB, getCleanOrigin());
7136 Value *
Base = getShadowPtrForRetval(IRBBefore);
7137 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
7149 setShadow(&CB, getCleanShadow(&CB));
7150 setOrigin(&CB, getCleanOrigin());
7157 "Could not find insertion point for retval shadow load");
7160 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
7163 setShadow(&CB, RetvalShadow);
7164 if (MS.TrackOrigins)
7165 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
7170 RetVal =
I->getOperand(0);
7173 return I->isMustTailCall();
7178 void visitReturnInst(ReturnInst &
I) {
7180 Value *RetVal =
I.getReturnValue();
7186 Value *ShadowPtr = getShadowPtrForRetval(IRB);
7187 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
7188 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
7191 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
7193 Value *Shadow = getShadow(RetVal);
7194 bool StoreOrigin =
true;
7196 insertCheckShadowOf(RetVal, &
I);
7197 Shadow = getCleanShadow(RetVal);
7198 StoreOrigin =
false;
7205 if (MS.TrackOrigins && StoreOrigin)
7206 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
7210 void visitPHINode(PHINode &
I) {
7212 if (!PropagateShadow) {
7213 setShadow(&
I, getCleanShadow(&
I));
7214 setOrigin(&
I, getCleanOrigin());
7218 ShadowPHINodes.push_back(&
I);
7219 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
7221 if (MS.TrackOrigins)
7223 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
7226 Value *getLocalVarIdptr(AllocaInst &
I) {
7227 ConstantInt *IntConst =
7228 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
7229 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
7234 Value *getLocalVarDescription(AllocaInst &
I) {
7240 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
7242 Value *ShadowBase, *OriginBase;
7243 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
7247 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
7250 if (PoisonStack && MS.TrackOrigins) {
7251 Value *Idptr = getLocalVarIdptr(
I);
7253 Value *Descr = getLocalVarDescription(
I);
7254 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
7255 {&I, Len, Idptr, Descr});
7257 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
7263 Value *Descr = getLocalVarDescription(
I);
7265 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
7267 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
7271 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
7274 NextNodeIRBuilder IRB(InsPoint);
7275 const DataLayout &
DL =
F.getDataLayout();
7276 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
7278 if (
I.isArrayAllocation())
7282 if (MS.CompileKernel)
7283 poisonAllocaKmsan(
I, IRB, Len);
7285 poisonAllocaUserspace(
I, IRB, Len);
7288 void visitAllocaInst(AllocaInst &
I) {
7289 setShadow(&
I, getCleanShadow(&
I));
7290 setOrigin(&
I, getCleanOrigin());
7296 void visitSelectInst(SelectInst &
I) {
7302 handleSelectLikeInst(
I,
B,
C,
D);
7308 Value *Sb = getShadow(
B);
7309 Value *Sc = getShadow(
C);
7310 Value *Sd = getShadow(
D);
7312 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
7313 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
7314 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
7319 if (
I.getType()->isAggregateType()) {
7323 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
7324 }
else if (isScalableNonVectorType(
I.getType())) {
7332 Sa1 = getCleanShadow(getShadowTy(
I.getType()));
7340 C = CreateAppToShadowCast(IRB,
C);
7341 D = CreateAppToShadowCast(IRB,
D);
7348 if (MS.TrackOrigins) {
7351 if (
B->getType()->isVectorTy()) {
7352 B = convertToBool(
B, IRB);
7353 Sb = convertToBool(Sb, IRB);
7361 void visitLandingPadInst(LandingPadInst &
I) {
7364 setShadow(&
I, getCleanShadow(&
I));
7365 setOrigin(&
I, getCleanOrigin());
7368 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7369 setShadow(&
I, getCleanShadow(&
I));
7370 setOrigin(&
I, getCleanOrigin());
7373 void visitFuncletPadInst(FuncletPadInst &
I) {
7374 setShadow(&
I, getCleanShadow(&
I));
7375 setOrigin(&
I, getCleanOrigin());
7378 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7380 void visitExtractValueInst(ExtractValueInst &
I) {
7382 Value *Agg =
I.getAggregateOperand();
7384 Value *AggShadow = getShadow(Agg);
7388 setShadow(&
I, ResShadow);
7389 setOriginForNaryOp(
I);
7392 void visitInsertValueInst(InsertValueInst &
I) {
7395 Value *AggShadow = getShadow(
I.getAggregateOperand());
7396 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7402 setOriginForNaryOp(
I);
7405 void dumpInst(Instruction &
I) {
7409 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7411 errs() <<
"QQQ " <<
I <<
"\n";
7414 void visitResumeInst(ResumeInst &
I) {
7419 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7424 void visitCatchReturnInst(CatchReturnInst &CRI) {
7429 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7438 insertCheckShadowOf(Operand, &
I);
7445 auto Size =
DL.getTypeStoreSize(ElemTy);
7447 if (MS.CompileKernel) {
7448 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7454 auto [ShadowPtr,
_] =
7455 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7465 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7466 int NumRetOutputs = 0;
7473 NumRetOutputs =
ST->getNumElements();
7478 for (
const InlineAsm::ConstraintInfo &Info : Constraints) {
7479 switch (
Info.Type) {
7487 return NumOutputs - NumRetOutputs;
7490 void visitAsmInstruction(Instruction &
I) {
7506 const DataLayout &
DL =
F.getDataLayout();
7510 int OutputArgs = getNumOutputArgs(IA, CB);
7516 for (
int i = OutputArgs; i < NumOperands; i++) {
7524 for (
int i = 0; i < OutputArgs; i++) {
7530 setShadow(&
I, getCleanShadow(&
I));
7531 setOrigin(&
I, getCleanOrigin());
7534 void visitFreezeInst(FreezeInst &
I) {
7536 setShadow(&
I, getCleanShadow(&
I));
7537 setOrigin(&
I, getCleanOrigin());
7540 void visitInstruction(Instruction &
I) {
7545 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7546 Value *Operand =
I.getOperand(i);
7548 insertCheckShadowOf(Operand, &
I);
7550 setShadow(&
I, getCleanShadow(&
I));
7551 setOrigin(&
I, getCleanOrigin());
7555struct VarArgHelperBase :
public VarArgHelper {
7557 MemorySanitizer &MS;
7558 MemorySanitizerVisitor &MSV;
7560 const unsigned VAListTagSize;
7562 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7563 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7564 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7568 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7574 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7583 return getShadowPtrForVAArgument(IRB, ArgOffset);
7592 ConstantInt::get(MS.IntptrTy, ArgOffset),
7597 unsigned BaseOffset) {
7606 TailSize,
Align(8));
7609 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7611 Value *VAListTag =
I.getArgOperand(0);
7613 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7614 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7617 VAListTagSize, Alignment,
false);
7620 void visitVAStartInst(VAStartInst &
I)
override {
7621 if (
F.getCallingConv() == CallingConv::Win64)
7624 unpoisonVAListTagForInst(
I);
7627 void visitVACopyInst(VACopyInst &
I)
override {
7628 if (
F.getCallingConv() == CallingConv::Win64)
7630 unpoisonVAListTagForInst(
I);
7635struct VarArgAMD64Helper :
public VarArgHelperBase {
7638 static const unsigned AMD64GpEndOffset = 48;
7639 static const unsigned AMD64FpEndOffsetSSE = 176;
7641 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7643 unsigned AMD64FpEndOffset;
7644 AllocaInst *VAArgTLSCopy =
nullptr;
7645 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7646 Value *VAArgOverflowSize =
nullptr;
7648 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7650 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7651 MemorySanitizerVisitor &MSV)
7652 : VarArgHelperBase(
F, MS, MSV, 24) {
7653 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7654 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7655 if (Attr.isStringAttribute() &&
7656 (Attr.getKindAsString() ==
"target-features")) {
7657 if (Attr.getValueAsString().contains(
"-sse"))
7658 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7664 ArgKind classifyArgument(
Value *arg) {
7667 if (
T->isX86_FP80Ty())
7669 if (
T->isFPOrFPVectorTy())
7670 return AK_FloatingPoint;
7671 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7672 return AK_GeneralPurpose;
7673 if (
T->isPointerTy())
7674 return AK_GeneralPurpose;
7686 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7687 unsigned GpOffset = 0;
7688 unsigned FpOffset = AMD64GpEndOffset;
7689 unsigned OverflowOffset = AMD64FpEndOffset;
7690 const DataLayout &
DL =
F.getDataLayout();
7694 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7701 assert(
A->getType()->isPointerTy());
7703 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7704 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7705 unsigned BaseOffset = OverflowOffset;
7706 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7707 Value *OriginBase =
nullptr;
7708 if (MS.TrackOrigins)
7709 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7710 OverflowOffset += AlignedSize;
7713 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7717 Value *ShadowPtr, *OriginPtr;
7718 std::tie(ShadowPtr, OriginPtr) =
7723 if (MS.TrackOrigins)
7727 ArgKind AK = classifyArgument(
A);
7728 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7730 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7732 Value *ShadowBase, *OriginBase =
nullptr;
7734 case AK_GeneralPurpose:
7735 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7736 if (MS.TrackOrigins)
7737 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7741 case AK_FloatingPoint:
7742 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7743 if (MS.TrackOrigins)
7744 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7751 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7752 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7753 unsigned BaseOffset = OverflowOffset;
7754 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7755 if (MS.TrackOrigins) {
7756 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7758 OverflowOffset += AlignedSize;
7761 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7770 Value *Shadow = MSV.getShadow(
A);
7772 if (MS.TrackOrigins) {
7773 Value *Origin = MSV.getOrigin(
A);
7774 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7775 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7781 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7782 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7785 void finalizeInstrumentation()
override {
7786 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7787 "finalizeInstrumentation called twice");
7788 if (!VAStartInstrumentationList.
empty()) {
7795 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7796 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7802 Intrinsic::umin, CopySize,
7806 if (MS.TrackOrigins) {
7807 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7816 for (CallInst *OrigInst : VAStartInstrumentationList) {
7817 NextNodeIRBuilder IRB(OrigInst);
7818 Value *VAListTag = OrigInst->getArgOperand(0);
7820 Value *RegSaveAreaPtrPtr =
7821 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7823 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7825 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7826 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7828 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7830 if (MS.TrackOrigins)
7831 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7832 Alignment, AMD64FpEndOffset);
7833 Value *OverflowArgAreaPtrPtr =
7834 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7835 Value *OverflowArgAreaPtr =
7836 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7837 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7838 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7839 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7843 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7845 if (MS.TrackOrigins) {
7848 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7856struct VarArgAArch64Helper :
public VarArgHelperBase {
7857 static const unsigned kAArch64GrArgSize = 64;
7858 static const unsigned kAArch64VrArgSize = 128;
7860 static const unsigned AArch64GrBegOffset = 0;
7861 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7863 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7864 static const unsigned AArch64VrEndOffset =
7865 AArch64VrBegOffset + kAArch64VrArgSize;
7866 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7868 AllocaInst *VAArgTLSCopy =
nullptr;
7869 Value *VAArgOverflowSize =
nullptr;
7871 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7873 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7874 MemorySanitizerVisitor &MSV)
7875 : VarArgHelperBase(
F, MS, MSV, 32) {}
7878 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7879 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7880 return {AK_GeneralPurpose, 1};
7881 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7882 return {AK_FloatingPoint, 1};
7884 if (
T->isArrayTy()) {
7885 auto R = classifyArgument(
T->getArrayElementType());
7886 R.second *=
T->getScalarType()->getArrayNumElements();
7891 auto R = classifyArgument(FV->getScalarType());
7892 R.second *= FV->getNumElements();
7897 return {AK_Memory, 0};
7909 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7910 unsigned GrOffset = AArch64GrBegOffset;
7911 unsigned VrOffset = AArch64VrBegOffset;
7912 unsigned OverflowOffset = AArch64VAEndOffset;
7914 const DataLayout &
DL =
F.getDataLayout();
7917 auto [AK, RegNum] = classifyArgument(
A->getType());
7918 if (AK == AK_GeneralPurpose &&
7919 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7921 if (AK == AK_FloatingPoint &&
7922 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7926 case AK_GeneralPurpose:
7927 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7928 GrOffset += 8 * RegNum;
7930 case AK_FloatingPoint:
7931 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7932 VrOffset += 16 * RegNum;
7939 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7940 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7941 unsigned BaseOffset = OverflowOffset;
7942 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7943 OverflowOffset += AlignedSize;
7946 CleanUnusedTLS(IRB,
Base, BaseOffset);
7958 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7959 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7964 Value *SaveAreaPtrPtr =
7965 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7966 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7971 Value *SaveAreaPtr =
7972 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7974 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7977 void finalizeInstrumentation()
override {
7978 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7979 "finalizeInstrumentation called twice");
7980 if (!VAStartInstrumentationList.empty()) {
7987 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7988 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7994 Intrinsic::umin, CopySize,
8000 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
8001 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
8005 for (CallInst *OrigInst : VAStartInstrumentationList) {
8006 NextNodeIRBuilder IRB(OrigInst);
8008 Value *VAListTag = OrigInst->getArgOperand(0);
8025 Value *StackSaveAreaPtr =
8026 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
8029 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
8030 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
8033 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
8036 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
8037 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
8040 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
8046 Value *GrRegSaveAreaShadowPtrOff =
8047 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
8049 Value *GrRegSaveAreaShadowPtr =
8050 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8056 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
8062 Value *VrRegSaveAreaShadowPtrOff =
8063 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
8065 Value *VrRegSaveAreaShadowPtr =
8066 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8073 VrRegSaveAreaShadowPtrOff);
8074 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
8080 Value *StackSaveAreaShadowPtr =
8081 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8086 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
8089 Align(16), VAArgOverflowSize);
8095struct VarArgPowerPC64Helper :
public VarArgHelperBase {
8096 AllocaInst *VAArgTLSCopy =
nullptr;
8097 Value *VAArgSize =
nullptr;
8099 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
8100 MemorySanitizerVisitor &MSV)
8101 : VarArgHelperBase(
F, MS, MSV, 8) {}
8103 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8111 Triple TargetTriple(
F.getParent()->getTargetTriple());
8115 if (TargetTriple.isPPC64ELFv2ABI())
8119 unsigned VAArgOffset = VAArgBase;
8120 const DataLayout &
DL =
F.getDataLayout();
8123 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8125 assert(
A->getType()->isPointerTy());
8127 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8130 ArgAlign =
Align(8);
8131 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8134 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8136 Value *AShadowPtr, *AOriginPtr;
8137 std::tie(AShadowPtr, AOriginPtr) =
8138 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8148 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8150 if (
A->getType()->isArrayTy()) {
8153 Type *ElementTy =
A->getType()->getArrayElementType();
8155 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8156 }
else if (
A->getType()->isVectorTy()) {
8158 ArgAlign =
Align(ArgSize);
8161 ArgAlign =
Align(8);
8162 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8163 if (
DL.isBigEndian()) {
8167 VAArgOffset += (8 - ArgSize);
8171 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8175 VAArgOffset += ArgSize;
8179 VAArgBase = VAArgOffset;
8183 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8186 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8189 void finalizeInstrumentation()
override {
8190 assert(!VAArgSize && !VAArgTLSCopy &&
8191 "finalizeInstrumentation called twice");
8194 Value *CopySize = VAArgSize;
8196 if (!VAStartInstrumentationList.empty()) {
8200 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8206 Intrinsic::umin, CopySize,
8214 for (CallInst *OrigInst : VAStartInstrumentationList) {
8215 NextNodeIRBuilder IRB(OrigInst);
8216 Value *VAListTag = OrigInst->getArgOperand(0);
8219 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8222 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8223 const DataLayout &
DL =
F.getDataLayout();
8224 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8226 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8227 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8229 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8236struct VarArgPowerPC32Helper :
public VarArgHelperBase {
8237 AllocaInst *VAArgTLSCopy =
nullptr;
8238 Value *VAArgSize =
nullptr;
8240 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
8241 MemorySanitizerVisitor &MSV)
8242 : VarArgHelperBase(
F, MS, MSV, 12) {}
8244 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8248 unsigned VAArgOffset = VAArgBase;
8249 const DataLayout &
DL =
F.getDataLayout();
8250 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8253 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8255 assert(
A->getType()->isPointerTy());
8257 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8259 if (ArgAlign < IntptrSize)
8260 ArgAlign =
Align(IntptrSize);
8261 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8264 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8266 Value *AShadowPtr, *AOriginPtr;
8267 std::tie(AShadowPtr, AOriginPtr) =
8268 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8278 Type *ArgTy =
A->getType();
8284 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
8291 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8294 ArgAlign =
Align(ArgSize);
8296 if (ArgAlign < IntptrSize)
8297 ArgAlign =
Align(IntptrSize);
8298 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8299 if (
DL.isBigEndian()) {
8302 if (ArgSize < IntptrSize)
8303 VAArgOffset += (IntptrSize - ArgSize);
8306 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
8312 VAArgOffset += ArgSize;
8319 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8322 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8325 void finalizeInstrumentation()
override {
8326 assert(!VAArgSize && !VAArgTLSCopy &&
8327 "finalizeInstrumentation called twice");
8329 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8330 Value *CopySize = VAArgSize;
8332 if (!VAStartInstrumentationList.empty()) {
8336 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8342 Intrinsic::umin, CopySize,
8350 for (CallInst *OrigInst : VAStartInstrumentationList) {
8351 NextNodeIRBuilder IRB(OrigInst);
8352 Value *VAListTag = OrigInst->getArgOperand(0);
8354 Value *RegSaveAreaSize = CopySize;
8358 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8362 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8364 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8367 const DataLayout &
DL =
F.getDataLayout();
8368 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8372 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8373 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8374 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8376 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8377 Alignment, RegSaveAreaSize);
8379 RegSaveAreaShadowPtr =
8382 ConstantInt::get(MS.IntptrTy, 32));
8387 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8392 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8395 OverflowAreaPtrPtr =
8396 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8397 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8399 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8401 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8402 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8403 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8406 Value *OverflowVAArgTLSCopyPtr =
8408 OverflowVAArgTLSCopyPtr =
8409 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8411 OverflowVAArgTLSCopyPtr =
8414 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8421struct VarArgSystemZHelper :
public VarArgHelperBase {
8422 static const unsigned SystemZGpOffset = 16;
8423 static const unsigned SystemZGpEndOffset = 56;
8424 static const unsigned SystemZFpOffset = 128;
8425 static const unsigned SystemZFpEndOffset = 160;
8426 static const unsigned SystemZMaxVrArgs = 8;
8427 static const unsigned SystemZRegSaveAreaSize = 160;
8428 static const unsigned SystemZOverflowOffset = 160;
8429 static const unsigned SystemZVAListTagSize = 32;
8430 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8431 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8433 bool IsSoftFloatABI;
8434 AllocaInst *VAArgTLSCopy =
nullptr;
8435 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8436 Value *VAArgOverflowSize =
nullptr;
8438 enum class ArgKind {
8446 enum class ShadowExtension {
None,
Zero, Sign };
8448 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8449 MemorySanitizerVisitor &MSV)
8450 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8451 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8453 ArgKind classifyArgument(
Type *
T) {
8460 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8461 return ArgKind::Indirect;
8462 if (
T->isFloatingPointTy())
8463 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8464 if (
T->isIntegerTy() ||
T->isPointerTy())
8465 return ArgKind::GeneralPurpose;
8466 if (
T->isVectorTy())
8467 return ArgKind::Vector;
8468 return ArgKind::Memory;
8471 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8481 return ShadowExtension::Zero;
8485 return ShadowExtension::Sign;
8487 return ShadowExtension::None;
8490 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8491 unsigned GpOffset = SystemZGpOffset;
8492 unsigned FpOffset = SystemZFpOffset;
8493 unsigned VrIndex = 0;
8494 unsigned OverflowOffset = SystemZOverflowOffset;
8495 const DataLayout &
DL =
F.getDataLayout();
8501 ArgKind AK = classifyArgument(
T);
8502 if (AK == ArgKind::Indirect) {
8504 AK = ArgKind::GeneralPurpose;
8506 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8507 AK = ArgKind::Memory;
8508 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8509 AK = ArgKind::Memory;
8510 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8511 AK = ArgKind::Memory;
8512 Value *ShadowBase =
nullptr;
8513 Value *OriginBase =
nullptr;
8514 ShadowExtension SE = ShadowExtension::None;
8516 case ArgKind::GeneralPurpose: {
8518 uint64_t ArgSize = 8;
8521 SE = getShadowExtension(CB, ArgNo);
8522 uint64_t GapSize = 0;
8523 if (SE == ShadowExtension::None) {
8524 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8525 assert(ArgAllocSize <= ArgSize);
8526 GapSize = ArgSize - ArgAllocSize;
8528 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8529 if (MS.TrackOrigins)
8530 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8532 GpOffset += ArgSize;
8538 case ArgKind::FloatingPoint: {
8540 uint64_t ArgSize = 8;
8547 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8548 if (MS.TrackOrigins)
8549 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8551 FpOffset += ArgSize;
8557 case ArgKind::Vector: {
8564 case ArgKind::Memory: {
8569 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8570 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8572 SE = getShadowExtension(CB, ArgNo);
8574 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8576 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8577 if (MS.TrackOrigins)
8579 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8580 OverflowOffset += ArgSize;
8587 case ArgKind::Indirect:
8590 if (ShadowBase ==
nullptr)
8592 Value *Shadow = MSV.getShadow(
A);
8593 if (SE != ShadowExtension::None)
8594 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8595 SE == ShadowExtension::Sign);
8596 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8598 if (MS.TrackOrigins) {
8599 Value *Origin = MSV.getOrigin(
A);
8600 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8601 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8605 Constant *OverflowSize = ConstantInt::get(
8606 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8607 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8614 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8617 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8619 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8620 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8625 unsigned RegSaveAreaSize =
8626 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8627 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8629 if (MS.TrackOrigins)
8630 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8631 Alignment, RegSaveAreaSize);
8640 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8642 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8643 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8645 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8646 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8649 SystemZOverflowOffset);
8650 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8652 if (MS.TrackOrigins) {
8654 SystemZOverflowOffset);
8655 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8660 void finalizeInstrumentation()
override {
8661 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8662 "finalizeInstrumentation called twice");
8663 if (!VAStartInstrumentationList.empty()) {
8670 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8672 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8678 Intrinsic::umin, CopySize,
8682 if (MS.TrackOrigins) {
8683 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8692 for (CallInst *OrigInst : VAStartInstrumentationList) {
8693 NextNodeIRBuilder IRB(OrigInst);
8694 Value *VAListTag = OrigInst->getArgOperand(0);
8695 copyRegSaveArea(IRB, VAListTag);
8696 copyOverflowArea(IRB, VAListTag);
8702struct VarArgI386Helper :
public VarArgHelperBase {
8703 AllocaInst *VAArgTLSCopy =
nullptr;
8704 Value *VAArgSize =
nullptr;
8706 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8707 MemorySanitizerVisitor &MSV)
8708 : VarArgHelperBase(
F, MS, MSV, 4) {}
8710 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8711 const DataLayout &
DL =
F.getDataLayout();
8712 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8713 unsigned VAArgOffset = 0;
8716 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8718 assert(
A->getType()->isPointerTy());
8720 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8722 if (ArgAlign < IntptrSize)
8723 ArgAlign =
Align(IntptrSize);
8724 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8726 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8728 Value *AShadowPtr, *AOriginPtr;
8729 std::tie(AShadowPtr, AOriginPtr) =
8730 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8740 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8742 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8743 if (
DL.isBigEndian()) {
8746 if (ArgSize < IntptrSize)
8747 VAArgOffset += (IntptrSize - ArgSize);
8750 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8753 VAArgOffset += ArgSize;
8759 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8762 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8765 void finalizeInstrumentation()
override {
8766 assert(!VAArgSize && !VAArgTLSCopy &&
8767 "finalizeInstrumentation called twice");
8769 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8770 Value *CopySize = VAArgSize;
8772 if (!VAStartInstrumentationList.empty()) {
8775 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8781 Intrinsic::umin, CopySize,
8789 for (CallInst *OrigInst : VAStartInstrumentationList) {
8790 NextNodeIRBuilder IRB(OrigInst);
8791 Value *VAListTag = OrigInst->getArgOperand(0);
8792 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8793 Value *RegSaveAreaPtrPtr =
8795 PointerType::get(*MS.C, 0));
8796 Value *RegSaveAreaPtr =
8797 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8798 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8799 const DataLayout &
DL =
F.getDataLayout();
8800 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8802 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8803 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8805 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8813struct VarArgGenericHelper :
public VarArgHelperBase {
8814 AllocaInst *VAArgTLSCopy =
nullptr;
8815 Value *VAArgSize =
nullptr;
8817 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8818 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8819 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8821 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8822 unsigned VAArgOffset = 0;
8823 const DataLayout &
DL =
F.getDataLayout();
8824 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8829 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8830 if (
DL.isBigEndian()) {
8833 if (ArgSize < IntptrSize)
8834 VAArgOffset += (IntptrSize - ArgSize);
8836 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8837 VAArgOffset += ArgSize;
8838 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8844 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8847 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8850 void finalizeInstrumentation()
override {
8851 assert(!VAArgSize && !VAArgTLSCopy &&
8852 "finalizeInstrumentation called twice");
8854 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8855 Value *CopySize = VAArgSize;
8857 if (!VAStartInstrumentationList.empty()) {
8860 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8866 Intrinsic::umin, CopySize,
8874 for (CallInst *OrigInst : VAStartInstrumentationList) {
8875 NextNodeIRBuilder IRB(OrigInst);
8876 Value *VAListTag = OrigInst->getArgOperand(0);
8877 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8878 Value *RegSaveAreaPtrPtr =
8880 PointerType::get(*MS.C, 0));
8881 Value *RegSaveAreaPtr =
8882 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8883 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8884 const DataLayout &
DL =
F.getDataLayout();
8885 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8887 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8888 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8890 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8898using VarArgARM32Helper = VarArgGenericHelper;
8899using VarArgRISCVHelper = VarArgGenericHelper;
8900using VarArgMIPSHelper = VarArgGenericHelper;
8901using VarArgLoongArch64Helper = VarArgGenericHelper;
8904struct VarArgNoOpHelper :
public VarArgHelper {
8905 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8906 MemorySanitizerVisitor &MSV) {}
8908 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8910 void visitVAStartInst(VAStartInst &
I)
override {}
8912 void visitVACopyInst(VACopyInst &
I)
override {}
8914 void finalizeInstrumentation()
override {}
8920 MemorySanitizerVisitor &Visitor) {
8923 Triple TargetTriple(Func.getParent()->getTargetTriple());
8926 return new VarArgI386Helper(Func, Msan, Visitor);
8929 return new VarArgAMD64Helper(Func, Msan, Visitor);
8931 if (TargetTriple.
isARM())
8932 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8935 return new VarArgAArch64Helper(Func, Msan, Visitor);
8938 return new VarArgSystemZHelper(Func, Msan, Visitor);
8943 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8946 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8949 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8952 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8955 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8958 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8961 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8964 return new VarArgNoOpHelper(Func, Msan, Visitor);
8971 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8974 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8981 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_S390X_MemoryMapParams
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(CounterInfo &Counter)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.