184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
241 "msan-track-origins",
246 cl::desc(
"keep going after reporting a UMR"),
255 "msan-poison-stack-with-call",
260 "msan-poison-stack-pattern",
261 cl::desc(
"poison uninitialized stack variables with the given pattern"),
266 cl::desc(
"Print name of local stack variable"),
271 cl::desc(
"Poison fully undef temporary values. "
272 "Partially undefined constant vectors "
273 "are unaffected by this flag (see "
274 "-msan-poison-undef-vectors)."),
278 "msan-poison-undef-vectors",
279 cl::desc(
"Precisely poison partially undefined constant vectors. "
280 "If false (legacy behavior), the entire vector is "
281 "considered fully initialized, which may lead to false "
282 "negatives. Fully undefined constant vectors are "
283 "unaffected by this flag (see -msan-poison-undef)."),
287 "msan-precise-disjoint-or",
288 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
289 "disjointedness is ignored (i.e., 1|1 is initialized)."),
294 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
299 cl::desc(
"exact handling of relational integer ICmp"),
303 "msan-handle-lifetime-intrinsics",
305 "when possible, poison scoped variables at the beginning of the scope "
306 "(slower, but more precise)"),
317 "msan-handle-asm-conservative",
328 "msan-check-access-address",
329 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
334 cl::desc(
"check arguments and return values at function call boundaries"),
338 "msan-dump-strict-instructions",
339 cl::desc(
"print out instructions with default strict semantics i.e.,"
340 "check that all the inputs are fully initialized, and mark "
341 "the output as fully initialized. These semantics are applied "
342 "to instructions that could not be handled explicitly nor "
351 "msan-dump-heuristic-instructions",
352 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
353 "Use -msan-dump-strict-instructions to print instructions that "
354 "could not be handled explicitly nor heuristically."),
358 "msan-instrumentation-with-call-threshold",
360 "If the function being instrumented requires more than "
361 "this number of checks and origin stores, use callbacks instead of "
362 "inline checks (-1 means never use callbacks)."),
367 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
377 cl::desc(
"Insert checks for constant shadow values"),
384 cl::desc(
"Place MSan constructors in comdat sections"),
390 cl::desc(
"Define custom MSan AndMask"),
394 cl::desc(
"Define custom MSan XorMask"),
398 cl::desc(
"Define custom MSan ShadowBase"),
402 cl::desc(
"Define custom MSan OriginBase"),
407 cl::desc(
"Define threshold for number of checks per "
408 "debug location to force origin update."),
420struct MemoryMapParams {
427struct PlatformMemoryMapParams {
428 const MemoryMapParams *bits32;
429 const MemoryMapParams *bits64;
591class MemorySanitizer {
600 MemorySanitizer(MemorySanitizer &&) =
delete;
601 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
602 MemorySanitizer(
const MemorySanitizer &) =
delete;
603 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
605 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
608 friend struct MemorySanitizerVisitor;
609 friend struct VarArgHelperBase;
610 friend struct VarArgAMD64Helper;
611 friend struct VarArgAArch64Helper;
612 friend struct VarArgPowerPC64Helper;
613 friend struct VarArgPowerPC32Helper;
614 friend struct VarArgSystemZHelper;
615 friend struct VarArgI386Helper;
616 friend struct VarArgGenericHelper;
618 void initializeModule(
Module &M);
619 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
620 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
621 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
623 template <
typename... ArgsTy>
624 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
650 Value *ParamOriginTLS;
656 Value *RetvalOriginTLS;
662 Value *VAArgOriginTLS;
665 Value *VAArgOverflowSizeTLS;
668 bool CallbacksInitialized =
false;
671 FunctionCallee WarningFn;
675 FunctionCallee MaybeWarningVarSizeFn;
680 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
682 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
685 FunctionCallee MsanPoisonStackFn;
689 FunctionCallee MsanChainOriginFn;
692 FunctionCallee MsanSetOriginFn;
695 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
698 StructType *MsanContextStateTy;
699 FunctionCallee MsanGetContextStateFn;
702 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
708 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
709 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
710 FunctionCallee MsanMetadataPtrForStore_1_8[4];
711 FunctionCallee MsanInstrumentAsmStoreFn;
714 Value *MsanMetadataAlloca;
717 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
720 const MemoryMapParams *MapParams;
724 MemoryMapParams CustomMapParams;
726 MDNode *ColdCallWeights;
729 MDNode *OriginStoreWeights;
732void insertModuleCtor(
Module &M) {
769 if (!Options.Kernel) {
778 MemorySanitizer Msan(*
F.getParent(), Options);
797 OS, MapClassName2PassName);
803 if (Options.EagerChecks)
804 OS <<
"eager-checks;";
805 OS <<
"track-origins=" << Options.TrackOrigins;
821template <
typename... ArgsTy>
823MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
828 std::forward<ArgsTy>(Args)...);
831 return M.getOrInsertFunction(Name, MsanMetadata,
832 std::forward<ArgsTy>(Args)...);
841 RetvalOriginTLS =
nullptr;
843 ParamOriginTLS =
nullptr;
845 VAArgOriginTLS =
nullptr;
846 VAArgOverflowSizeTLS =
nullptr;
848 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
850 IRB.getVoidTy(), IRB.getInt32Ty());
861 MsanGetContextStateFn =
862 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
866 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
867 std::string name_load =
868 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
869 std::string name_store =
870 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
871 MsanMetadataPtrForLoad_1_8[ind] =
872 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
873 MsanMetadataPtrForStore_1_8[ind] =
874 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
877 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
878 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
879 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
880 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
883 MsanPoisonAllocaFn =
M.getOrInsertFunction(
884 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
885 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
886 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
890 return M.getOrInsertGlobal(Name, Ty, [&] {
892 nullptr, Name,
nullptr,
898void MemorySanitizer::createUserspaceApi(
Module &M,
906 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
907 :
"__msan_warning_with_origin_noreturn";
908 WarningFn =
M.getOrInsertFunction(WarningFnName,
910 IRB.getVoidTy(), IRB.getInt32Ty());
913 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
914 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
941 IRB.getIntPtrTy(
M.getDataLayout()));
945 unsigned AccessSize = 1 << AccessSizeIndex;
946 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
947 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
949 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
950 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
951 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
952 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
953 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
954 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
956 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
960 MsanSetAllocaOriginWithDescriptionFn =
961 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
962 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
963 MsanSetAllocaOriginNoDescriptionFn =
964 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
965 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
966 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
967 IRB.getVoidTy(), PtrTy, IntptrTy);
971void MemorySanitizer::initializeCallbacks(
Module &M,
974 if (CallbacksInitialized)
980 MsanChainOriginFn =
M.getOrInsertFunction(
981 "__msan_chain_origin",
984 MsanSetOriginFn =
M.getOrInsertFunction(
986 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
988 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
990 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
991 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
993 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
995 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
996 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
999 createKernelApi(M, TLI);
1001 createUserspaceApi(M, TLI);
1003 CallbacksInitialized =
true;
1009 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1027void MemorySanitizer::initializeModule(
Module &M) {
1028 auto &
DL =
M.getDataLayout();
1030 TargetTriple =
M.getTargetTriple();
1032 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1033 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1035 if (ShadowPassed || OriginPassed) {
1040 MapParams = &CustomMapParams;
1042 switch (TargetTriple.getOS()) {
1044 switch (TargetTriple.getArch()) {
1059 switch (TargetTriple.getArch()) {
1068 switch (TargetTriple.getArch()) {
1102 C = &(
M.getContext());
1104 IntptrTy = IRB.getIntPtrTy(
DL);
1105 OriginTy = IRB.getInt32Ty();
1106 PtrTy = IRB.getPtrTy();
1111 if (!CompileKernel) {
1113 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1114 return new GlobalVariable(
1115 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1116 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1120 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1121 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1122 GlobalValue::WeakODRLinkage,
1123 IRB.getInt32(Recover),
"__msan_keep_going");
1138struct VarArgHelper {
1139 virtual ~VarArgHelper() =
default;
1142 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1145 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1148 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1154 virtual void finalizeInstrumentation() = 0;
1157struct MemorySanitizerVisitor;
1162 MemorySanitizerVisitor &Visitor);
1169 if (TypeSizeFixed <= 8)
1178class NextNodeIRBuilder :
public IRBuilder<> {
1191struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1193 MemorySanitizer &MS;
1195 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1196 std::unique_ptr<VarArgHelper> VAHelper;
1197 const TargetLibraryInfo *TLI;
1204 bool PropagateShadow;
1207 bool PoisonUndefVectors;
1209 struct ShadowOriginAndInsertPoint {
1214 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1215 : Shadow(S), Origin(
O), OrigIns(
I) {}
1218 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1219 SmallSetVector<AllocaInst *, 16> AllocaSet;
1222 int64_t SplittableBlocksCount = 0;
1224 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1225 const TargetLibraryInfo &TLI)
1227 bool SanitizeFunction =
1229 InsertChecks = SanitizeFunction;
1230 PropagateShadow = SanitizeFunction;
1241 MS.initializeCallbacks(*
F.getParent(), TLI);
1243 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1244 .CreateIntrinsic(Intrinsic::donothing, {});
1246 if (MS.CompileKernel) {
1248 insertKmsanPrologue(IRB);
1252 <<
"MemorySanitizer is not inserting checks into '"
1253 <<
F.getName() <<
"'\n");
1256 bool instrumentWithCalls(
Value *V) {
1260 ++SplittableBlocksCount;
1265 bool isInPrologue(Instruction &
I) {
1266 return I.getParent() == FnPrologueEnd->
getParent() &&
1275 if (MS.TrackOrigins <= 1)
1277 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1281 const DataLayout &
DL =
F.getDataLayout();
1282 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1292 TypeSize TS, Align Alignment) {
1293 const DataLayout &
DL =
F.getDataLayout();
1294 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1295 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1307 auto [InsertPt,
Index] =
1319 Align CurrentAlignment = Alignment;
1320 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1321 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1323 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1328 CurrentAlignment = IntptrAlignment;
1341 Value *OriginPtr, Align Alignment) {
1342 const DataLayout &
DL =
F.getDataLayout();
1344 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1346 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1355 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1362 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1364 if (instrumentWithCalls(ConvertedShadow) &&
1366 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1367 Value *ConvertedShadow2 =
1369 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1373 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1377 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1382 void materializeStores() {
1383 for (StoreInst *SI : StoreList) {
1385 Value *Val =
SI->getValueOperand();
1386 Value *Addr =
SI->getPointerOperand();
1387 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1388 Value *ShadowPtr, *OriginPtr;
1390 const Align Alignment =
SI->getAlign();
1392 std::tie(ShadowPtr, OriginPtr) =
1393 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1395 [[maybe_unused]] StoreInst *NewSI =
1402 if (MS.TrackOrigins && !
SI->isAtomic())
1403 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1410 if (MS.TrackOrigins < 2)
1413 if (LazyWarningDebugLocationCount.
empty())
1414 for (
const auto &
I : InstrumentationList)
1415 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1431 auto NewDebugLoc = OI->getDebugLoc();
1438 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1439 Origin = updateOrigin(Origin, IRBOrigin);
1444 if (MS.CompileKernel || MS.TrackOrigins)
1455 const DataLayout &
DL =
F.getDataLayout();
1456 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1458 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1460 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1461 Value *ConvertedShadow2 =
1465 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1469 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1473 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1476 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1479 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1480 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1485 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1488 !MS.Recover, MS.ColdCallWeights);
1491 insertWarningFn(IRB, Origin);
1496 void materializeInstructionChecks(
1498 const DataLayout &
DL =
F.getDataLayout();
1501 bool Combine = !MS.TrackOrigins;
1503 Value *Shadow =
nullptr;
1504 for (
const auto &ShadowData : InstructionChecks) {
1505 assert(ShadowData.OrigIns == Instruction);
1508 Value *ConvertedShadow = ShadowData.Shadow;
1517 insertWarningFn(IRB, ShadowData.Origin);
1527 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1532 Shadow = ConvertedShadow;
1536 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1537 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1538 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1544 materializeOneCheck(IRB, Shadow,
nullptr);
1548 static bool isAArch64SVCount(
Type *Ty) {
1550 return TTy->
getName() ==
"aarch64.svcount";
1556 static bool isScalableNonVectorType(
Type *Ty) {
1557 if (!isAArch64SVCount(Ty))
1558 LLVM_DEBUG(
dbgs() <<
"isScalableNonVectorType: Unexpected type " << *Ty
1564 void materializeChecks() {
1567 SmallPtrSet<Instruction *, 16>
Done;
1570 for (
auto I = InstrumentationList.begin();
1571 I != InstrumentationList.end();) {
1572 auto OrigIns =
I->OrigIns;
1576 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1577 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1578 return OrigIns != R.OrigIns;
1592 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1593 {Zero, IRB.getInt32(0)},
"param_shadow");
1594 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1595 {Zero, IRB.getInt32(1)},
"retval_shadow");
1596 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1597 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1598 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1599 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1600 MS.VAArgOverflowSizeTLS =
1601 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1602 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1603 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1604 {Zero, IRB.getInt32(5)},
"param_origin");
1605 MS.RetvalOriginTLS =
1606 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1607 {Zero, IRB.getInt32(6)},
"retval_origin");
1609 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1622 for (Instruction *
I : Instructions)
1626 for (PHINode *PN : ShadowPHINodes) {
1628 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1629 size_t NumValues = PN->getNumIncomingValues();
1630 for (
size_t v = 0;
v < NumValues;
v++) {
1631 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1633 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1637 VAHelper->finalizeInstrumentation();
1642 for (
auto Item : LifetimeStartList) {
1643 instrumentAlloca(*Item.second, Item.first);
1644 AllocaSet.
remove(Item.second);
1649 for (AllocaInst *AI : AllocaSet)
1650 instrumentAlloca(*AI);
1653 materializeChecks();
1657 materializeStores();
1663 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1674 const DataLayout &
DL =
F.getDataLayout();
1676 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1678 VT->getElementCount());
1681 return ArrayType::get(getShadowTy(AT->getElementType()),
1682 AT->getNumElements());
1686 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1687 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1689 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1692 if (isScalableNonVectorType(OrigTy)) {
1693 LLVM_DEBUG(
dbgs() <<
"getShadowTy: Scalable non-vector type: " << *OrigTy
1698 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1703 Value *collapseStructShadow(StructType *Struct,
Value *Shadow,
1708 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1711 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1713 if (Aggregator != FalseVal)
1714 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1716 Aggregator = ShadowBool;
1723 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1725 if (!
Array->getNumElements())
1729 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1731 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1733 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1734 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1744 return collapseStructShadow(Struct, V, IRB);
1746 return collapseArrayShadow(Array, V, IRB);
1751 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1759 Type *VTy =
V->getType();
1761 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1768 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1770 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1771 VectTy->getElementCount());
1777 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1779 return VectorType::get(
1780 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1781 VectTy->getElementCount());
1783 assert(IntPtrTy == MS.IntptrTy);
1790 VectTy->getElementCount(),
1791 constToIntPtr(VectTy->getElementType(),
C));
1793 assert(IntPtrTy == MS.IntptrTy);
1796 return ConstantInt::get(MS.IntptrTy,
C,
false,
1810 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 if (uint64_t AndMask = MS.MapParams->AndMask)
1814 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1816 if (uint64_t XorMask = MS.MapParams->XorMask)
1817 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1829 std::pair<Value *, Value *>
1831 MaybeAlign Alignment) {
1836 assert(VectTy->getElementType()->isPointerTy());
1838 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1839 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1840 Value *ShadowLong = ShadowOffset;
1841 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1843 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1846 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1848 Value *OriginPtr =
nullptr;
1849 if (MS.TrackOrigins) {
1850 Value *OriginLong = ShadowOffset;
1851 uint64_t OriginBase = MS.MapParams->OriginBase;
1852 if (OriginBase != 0)
1854 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1857 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1860 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1862 return std::make_pair(ShadowPtr, OriginPtr);
1865 template <
typename... ArgsTy>
1870 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1871 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1874 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1877 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1881 Value *ShadowOriginPtrs;
1882 const DataLayout &
DL =
F.getDataLayout();
1883 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1885 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1888 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1890 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1891 ShadowOriginPtrs = createMetadataCall(
1893 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1900 return std::make_pair(ShadowPtr, OriginPtr);
1906 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1913 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1918 Value *ShadowPtrs = ConstantInt::getNullValue(
1920 Value *OriginPtrs =
nullptr;
1921 if (MS.TrackOrigins)
1922 OriginPtrs = ConstantInt::getNullValue(
1924 for (
unsigned i = 0; i < NumElements; ++i) {
1927 auto [ShadowPtr, OriginPtr] =
1928 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1931 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1932 if (MS.TrackOrigins)
1934 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1936 return {ShadowPtrs, OriginPtrs};
1939 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1941 MaybeAlign Alignment,
1943 if (MS.CompileKernel)
1944 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1945 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1953 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1958 if (!MS.TrackOrigins)
1961 ConstantInt::get(MS.IntptrTy, ArgOffset),
1971 Value *getOriginPtrForRetval() {
1973 return MS.RetvalOriginTLS;
1978 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1979 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1984 if (!MS.TrackOrigins)
1986 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1987 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1988 OriginMap[
V] = Origin;
1992 Type *ShadowTy = getShadowTy(OrigTy);
2002 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
2010 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
2011 getPoisonedShadow(AT->getElementType()));
2015 SmallVector<Constant *, 4> Vals;
2016 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
2017 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2025 Type *ShadowTy = getShadowTy(V);
2028 return getPoisonedShadow(ShadowTy);
2040 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2041 return getCleanShadow(V);
2043 Value *Shadow = ShadowMap[
V];
2045 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2046 assert(Shadow &&
"No shadow for a value");
2053 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2054 : getCleanShadow(V);
2060 Value *&ShadowPtr = ShadowMap[
V];
2065 unsigned ArgOffset = 0;
2066 const DataLayout &
DL =
F->getDataLayout();
2067 for (
auto &FArg :
F->args()) {
2068 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2070 ?
"vscale not fully supported\n"
2071 :
"Arg is not sized\n"));
2073 ShadowPtr = getCleanShadow(V);
2074 setOrigin(
A, getCleanOrigin());
2080 unsigned Size = FArg.hasByValAttr()
2081 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2082 :
DL.getTypeAllocSize(FArg.getType());
2086 if (FArg.hasByValAttr()) {
2090 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2091 FArg.getParamAlign(), FArg.getParamByValType());
2092 Value *CpShadowPtr, *CpOriginPtr;
2093 std::tie(CpShadowPtr, CpOriginPtr) =
2094 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2096 if (!PropagateShadow || Overflow) {
2098 EntryIRB.CreateMemSet(
2102 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2104 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2105 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2108 if (MS.TrackOrigins) {
2109 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2113 EntryIRB.CreateMemCpy(
2122 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2123 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2124 ShadowPtr = getCleanShadow(V);
2125 setOrigin(
A, getCleanOrigin());
2128 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2129 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2131 if (MS.TrackOrigins) {
2132 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2133 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2137 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2143 assert(ShadowPtr &&
"Could not find shadow for an argument");
2150 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2151 PoisonUndefVectors) {
2154 for (
unsigned i = 0; i != NumElems; ++i) {
2157 : getCleanShadow(Elem);
2161 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2162 << *ShadowConstant <<
"\n");
2164 return ShadowConstant;
2170 return getCleanShadow(V);
2174 Value *getShadow(Instruction *
I,
int i) {
2175 return getShadow(
I->getOperand(i));
2180 if (!MS.TrackOrigins)
2183 return getCleanOrigin();
2185 "Unexpected value type in getOrigin()");
2187 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2188 return getCleanOrigin();
2190 Value *Origin = OriginMap[
V];
2191 assert(Origin &&
"Missing origin");
2196 Value *getOrigin(Instruction *
I,
int i) {
2197 return getOrigin(
I->getOperand(i));
2204 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2210 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2211 << *OrigIns <<
"\n");
2216 if (isScalableNonVectorType(ShadowTy)) {
2217 LLVM_DEBUG(
dbgs() <<
"Skipping check of scalable non-vector " << *Shadow
2218 <<
" before " << *OrigIns <<
"\n");
2224 "Can only insert checks for integer, vector, and aggregate shadow "
2227 InstrumentationList.push_back(
2228 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2236 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2238 Value *Shadow, *Origin;
2240 Shadow = getShadow(Val);
2243 Origin = getOrigin(Val);
2250 insertCheckShadow(Shadow, Origin, OrigIns);
2255 case AtomicOrdering::NotAtomic:
2256 return AtomicOrdering::NotAtomic;
2257 case AtomicOrdering::Unordered:
2258 case AtomicOrdering::Monotonic:
2259 case AtomicOrdering::Release:
2260 return AtomicOrdering::Release;
2261 case AtomicOrdering::Acquire:
2262 case AtomicOrdering::AcquireRelease:
2263 return AtomicOrdering::AcquireRelease;
2264 case AtomicOrdering::SequentiallyConsistent:
2265 return AtomicOrdering::SequentiallyConsistent;
2271 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2272 uint32_t OrderingTable[NumOrderings] = {};
2274 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2275 OrderingTable[(
int)AtomicOrderingCABI::release] =
2276 (int)AtomicOrderingCABI::release;
2277 OrderingTable[(int)AtomicOrderingCABI::consume] =
2278 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2279 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2280 (
int)AtomicOrderingCABI::acq_rel;
2281 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2282 (
int)AtomicOrderingCABI::seq_cst;
2289 case AtomicOrdering::NotAtomic:
2290 return AtomicOrdering::NotAtomic;
2291 case AtomicOrdering::Unordered:
2292 case AtomicOrdering::Monotonic:
2293 case AtomicOrdering::Acquire:
2294 return AtomicOrdering::Acquire;
2295 case AtomicOrdering::Release:
2296 case AtomicOrdering::AcquireRelease:
2297 return AtomicOrdering::AcquireRelease;
2298 case AtomicOrdering::SequentiallyConsistent:
2299 return AtomicOrdering::SequentiallyConsistent;
2305 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2306 uint32_t OrderingTable[NumOrderings] = {};
2308 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2309 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2310 OrderingTable[(int)AtomicOrderingCABI::consume] =
2311 (
int)AtomicOrderingCABI::acquire;
2312 OrderingTable[(int)AtomicOrderingCABI::release] =
2313 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2314 (int)AtomicOrderingCABI::acq_rel;
2315 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2316 (
int)AtomicOrderingCABI::seq_cst;
2322 using InstVisitor<MemorySanitizerVisitor>
::visit;
2323 void visit(Instruction &
I) {
2324 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2327 if (isInPrologue(
I))
2332 setShadow(&
I, getCleanShadow(&
I));
2333 setOrigin(&
I, getCleanOrigin());
2344 void visitLoadInst(LoadInst &
I) {
2345 assert(
I.getType()->isSized() &&
"Load type must have size");
2346 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2347 NextNodeIRBuilder IRB(&
I);
2348 Type *ShadowTy = getShadowTy(&
I);
2349 Value *Addr =
I.getPointerOperand();
2350 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2351 const Align Alignment =
I.getAlign();
2352 if (PropagateShadow) {
2353 std::tie(ShadowPtr, OriginPtr) =
2354 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2358 setShadow(&
I, getCleanShadow(&
I));
2362 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2367 if (MS.TrackOrigins) {
2368 if (PropagateShadow) {
2373 setOrigin(&
I, getCleanOrigin());
2382 void visitStoreInst(StoreInst &
I) {
2383 StoreList.push_back(&
I);
2385 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2388 void handleCASOrRMW(Instruction &
I) {
2392 Value *Addr =
I.getOperand(0);
2393 Value *Val =
I.getOperand(1);
2394 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2399 insertCheckShadowOf(Addr, &
I);
2405 insertCheckShadowOf(Val, &
I);
2409 setShadow(&
I, getCleanShadow(&
I));
2410 setOrigin(&
I, getCleanOrigin());
2413 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2418 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2424 void visitExtractElementInst(ExtractElementInst &
I) {
2425 insertCheckShadowOf(
I.getOperand(1), &
I);
2429 setOrigin(&
I, getOrigin(&
I, 0));
2432 void visitInsertElementInst(InsertElementInst &
I) {
2433 insertCheckShadowOf(
I.getOperand(2), &
I);
2435 auto *Shadow0 = getShadow(&
I, 0);
2436 auto *Shadow1 = getShadow(&
I, 1);
2439 setOriginForNaryOp(
I);
2442 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2444 auto *Shadow0 = getShadow(&
I, 0);
2445 auto *Shadow1 = getShadow(&
I, 1);
2448 setOriginForNaryOp(
I);
2452 void visitSExtInst(SExtInst &
I) {
2454 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2455 setOrigin(&
I, getOrigin(&
I, 0));
2458 void visitZExtInst(ZExtInst &
I) {
2460 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2461 setOrigin(&
I, getOrigin(&
I, 0));
2464 void visitTruncInst(TruncInst &
I) {
2466 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2467 setOrigin(&
I, getOrigin(&
I, 0));
2470 void visitBitCastInst(BitCastInst &
I) {
2475 if (CI->isMustTailCall())
2479 setOrigin(&
I, getOrigin(&
I, 0));
2482 void visitPtrToIntInst(PtrToIntInst &
I) {
2485 "_msprop_ptrtoint"));
2486 setOrigin(&
I, getOrigin(&
I, 0));
2489 void visitIntToPtrInst(IntToPtrInst &
I) {
2492 "_msprop_inttoptr"));
2493 setOrigin(&
I, getOrigin(&
I, 0));
2496 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2497 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2498 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2499 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2500 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2501 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2525 return IRB.
CreateOr({S1S2, V1S2, S1V2});
2529 void visitAnd(BinaryOperator &
I) {
2531 Value *V1 =
I.getOperand(0);
2532 Value *V2 =
I.getOperand(1);
2534 Value *S2 = getShadow(&
I, 1);
2536 Value *OutShadow = handleBitwiseAnd(IRB, V1, V2,
S1, S2);
2538 setShadow(&
I, OutShadow);
2539 setOriginForNaryOp(
I);
2542 void visitOr(BinaryOperator &
I) {
2555 Value *S2 = getShadow(&
I, 1);
2556 Value *V1 =
I.getOperand(0);
2557 Value *V2 =
I.getOperand(1);
2576 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2580 setOriginForNaryOp(
I);
2598 template <
bool CombineShadow>
class Combiner {
2599 Value *Shadow =
nullptr;
2600 Value *Origin =
nullptr;
2602 MemorySanitizerVisitor *MSV;
2605 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2606 : IRB(IRB), MSV(MSV) {}
2610 if (CombineShadow) {
2615 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2616 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2620 if (MSV->MS.TrackOrigins) {
2627 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2628 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2638 Value *OpShadow = MSV->getShadow(V);
2639 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2640 return Add(OpShadow, OpOrigin);
2645 void Done(Instruction *
I) {
2646 if (CombineShadow) {
2648 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2649 MSV->setShadow(
I, Shadow);
2651 if (MSV->MS.TrackOrigins) {
2653 MSV->setOrigin(
I, Origin);
2659 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2660 if (MSV->MS.TrackOrigins) {
2667 using ShadowAndOriginCombiner = Combiner<true>;
2668 using OriginCombiner = Combiner<false>;
2671 void setOriginForNaryOp(Instruction &
I) {
2672 if (!MS.TrackOrigins)
2675 OriginCombiner OC(
this, IRB);
2676 for (Use &
Op :
I.operands())
2681 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2683 "Vector of pointers is not a valid shadow type");
2693 Type *srcTy =
V->getType();
2696 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2697 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2698 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2716 Type *ShadowTy = getShadowTy(V);
2717 if (
V->getType() == ShadowTy)
2719 if (
V->getType()->isPtrOrPtrVectorTy())
2726 void handleShadowOr(Instruction &
I) {
2728 ShadowAndOriginCombiner SC(
this, IRB);
2729 for (Use &
Op :
I.operands())
2756 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2757 unsigned Shards,
Value *VectorA,
Value *VectorB) {
2762 [[maybe_unused]]
unsigned TotalNumElems = NumElems;
2768 assert(NumElems % (ReductionFactor * Shards) == 0);
2773 for (
unsigned i = 0; i < ReductionFactor; i++) {
2774 SmallVector<int, 16>
Mask;
2776 for (
unsigned j = 0;
j < Shards;
j++) {
2777 unsigned Offset = NumElems / Shards *
j;
2779 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2783 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2808 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards) {
2809 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2811 assert(
I.getType()->isVectorTy());
2812 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2814 [[maybe_unused]] FixedVectorType *ParamType =
2818 [[maybe_unused]] FixedVectorType *
ReturnType =
2826 Value *FirstArgShadow = getShadow(&
I, 0);
2827 Value *SecondArgShadow =
nullptr;
2828 if (
I.arg_size() == 2)
2829 SecondArgShadow = getShadow(&
I, 1);
2831 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2832 FirstArgShadow, SecondArgShadow);
2834 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2836 setShadow(&
I, OrShadow);
2837 setOriginForNaryOp(
I);
2847 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards,
2848 int ReinterpretElemWidth) {
2849 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2851 assert(
I.getType()->isVectorTy());
2852 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2854 FixedVectorType *ParamType =
2859 [[maybe_unused]] FixedVectorType *
ReturnType =
2866 FixedVectorType *ReinterpretShadowTy =
nullptr;
2874 Value *FirstArgShadow = getShadow(&
I, 0);
2875 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2885 Value *SecondArgShadow =
nullptr;
2886 if (
I.arg_size() == 2) {
2887 SecondArgShadow = getShadow(&
I, 1);
2888 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2891 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2892 FirstArgShadow, SecondArgShadow);
2894 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2896 setShadow(&
I, OrShadow);
2897 setOriginForNaryOp(
I);
2900 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2911 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2917 Type *EltTy = VTy->getElementType();
2919 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2920 if (ConstantInt *Elt =
2922 const APInt &
V = Elt->getValue();
2923 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2924 Elements.push_back(ConstantInt::get(EltTy, V2));
2926 Elements.push_back(ConstantInt::get(EltTy, 1));
2932 const APInt &
V = Elt->getValue();
2933 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2934 ShadowMul = ConstantInt::get(Ty, V2);
2936 ShadowMul = ConstantInt::get(Ty, 1);
2942 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2943 setOrigin(&
I, getOrigin(OtherArg));
2946 void visitMul(BinaryOperator &
I) {
2949 if (constOp0 && !constOp1)
2950 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2951 else if (constOp1 && !constOp0)
2952 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2957 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2958 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2959 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2960 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2961 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2962 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2964 void handleIntegerDiv(Instruction &
I) {
2967 insertCheckShadowOf(
I.getOperand(1), &
I);
2968 setShadow(&
I, getShadow(&
I, 0));
2969 setOrigin(&
I, getOrigin(&
I, 0));
2972 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2973 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2974 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2975 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2979 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2980 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2986 void handleEqualityComparison(ICmpInst &
I) {
2990 Value *Sa = getShadow(
A);
2991 Value *Sb = getShadow(
B);
3017 setOriginForNaryOp(
I);
3025 void handleRelationalComparisonExact(ICmpInst &
I) {
3029 Value *Sa = getShadow(
A);
3030 Value *Sb = getShadow(
B);
3041 bool IsSigned =
I.isSigned();
3043 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
3053 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
3058 return std::make_pair(Min, Max);
3061 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3062 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3068 setOriginForNaryOp(
I);
3075 void handleSignedRelationalComparison(ICmpInst &
I) {
3080 op =
I.getOperand(0);
3081 pre =
I.getPredicate();
3083 op =
I.getOperand(1);
3084 pre =
I.getSwappedPredicate();
3097 setShadow(&
I, Shadow);
3098 setOrigin(&
I, getOrigin(
op));
3104 void visitICmpInst(ICmpInst &
I) {
3109 if (
I.isEquality()) {
3110 handleEqualityComparison(
I);
3116 handleRelationalComparisonExact(
I);
3120 handleSignedRelationalComparison(
I);
3126 handleRelationalComparisonExact(
I);
3133 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3135 void handleShift(BinaryOperator &
I) {
3140 Value *S2 = getShadow(&
I, 1);
3143 Value *V2 =
I.getOperand(1);
3145 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3146 setOriginForNaryOp(
I);
3149 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3150 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3151 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3153 void handleFunnelShift(IntrinsicInst &
I) {
3157 Value *S0 = getShadow(&
I, 0);
3159 Value *S2 = getShadow(&
I, 2);
3162 Value *V2 =
I.getOperand(2);
3165 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3166 setOriginForNaryOp(
I);
3179 void visitMemMoveInst(MemMoveInst &
I) {
3180 getShadow(
I.getArgOperand(1));
3183 {I.getArgOperand(0), I.getArgOperand(1),
3184 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3202 void visitMemCpyInst(MemCpyInst &
I) {
3203 getShadow(
I.getArgOperand(1));
3206 {I.getArgOperand(0), I.getArgOperand(1),
3207 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3212 void visitMemSetInst(MemSetInst &
I) {
3216 {I.getArgOperand(0),
3217 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3218 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3222 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3224 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3230 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3234 Value *Addr =
I.getArgOperand(0);
3235 Value *Shadow = getShadow(&
I, 1);
3236 Value *ShadowPtr, *OriginPtr;
3240 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3245 insertCheckShadowOf(Addr, &
I);
3248 if (MS.TrackOrigins)
3257 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3261 Value *Addr =
I.getArgOperand(0);
3263 Type *ShadowTy = getShadowTy(&
I);
3264 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3265 if (PropagateShadow) {
3269 std::tie(ShadowPtr, OriginPtr) =
3270 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3274 setShadow(&
I, getCleanShadow(&
I));
3278 insertCheckShadowOf(Addr, &
I);
3280 if (MS.TrackOrigins) {
3281 if (PropagateShadow)
3282 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3284 setOrigin(&
I, getCleanOrigin());
3304 [[maybe_unused]]
bool
3305 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3306 unsigned int trailingFlags) {
3307 Type *RetTy =
I.getType();
3311 unsigned NumArgOperands =
I.arg_size();
3312 assert(NumArgOperands >= trailingFlags);
3313 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3314 Type *Ty =
I.getArgOperand(i)->getType();
3320 ShadowAndOriginCombiner SC(
this, IRB);
3321 for (
unsigned i = 0; i < NumArgOperands; ++i)
3322 SC.Add(
I.getArgOperand(i));
3339 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3340 unsigned NumArgOperands =
I.arg_size();
3341 if (NumArgOperands == 0)
3344 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3345 I.getArgOperand(1)->getType()->isVectorTy() &&
3346 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3348 return handleVectorStoreIntrinsic(
I);
3351 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3352 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3354 return handleVectorLoadIntrinsic(
I);
3357 if (
I.doesNotAccessMemory())
3358 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3366 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3367 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3371 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3378 void handleInvariantGroup(IntrinsicInst &
I) {
3379 setShadow(&
I, getShadow(&
I, 0));
3380 setOrigin(&
I, getOrigin(&
I, 0));
3383 void handleLifetimeStart(IntrinsicInst &
I) {
3388 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3391 void handleBswap(IntrinsicInst &
I) {
3394 Type *OpType =
Op->getType();
3397 setOrigin(&
I, getOrigin(
Op));
3418 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3420 Value *Src =
I.getArgOperand(0);
3421 Value *SrcShadow = getShadow(Src);
3425 I.getType(),
I.getIntrinsicID(), {Src, False});
3427 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3430 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3432 Value *NotAllZeroShadow =
3434 Value *OutputShadow =
3435 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3441 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3444 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3446 setShadow(&
I, OutputShadow);
3447 setOriginForNaryOp(
I);
3457 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3461 Value *S0 = getShadow(&
I, 0);
3470 setShadow(&
I, OutShadow);
3471 setOriginForNaryOp(
I);
3480 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3500 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3505 Value *FullShadow = getCleanShadow(&
I);
3506 unsigned ShadowNumElems =
3508 unsigned FullShadowNumElems =
3511 assert((ShadowNumElems == FullShadowNumElems) ||
3512 (ShadowNumElems * 2 == FullShadowNumElems));
3514 if (ShadowNumElems == FullShadowNumElems) {
3515 FullShadow = Shadow;
3519 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3544 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3545 bool HasRoundingMode) {
3546 if (HasRoundingMode) {
3554 Value *Src =
I.getArgOperand(0);
3555 assert(Src->getType()->isVectorTy());
3559 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3562 Value *S0 = getShadow(&
I, 0);
3574 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3576 setShadow(&
I, FullShadow);
3577 setOriginForNaryOp(
I);
3598 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3599 bool HasRoundingMode =
false) {
3601 Value *CopyOp, *ConvertOp;
3603 assert((!HasRoundingMode ||
3605 "Invalid rounding mode");
3607 switch (
I.arg_size() - HasRoundingMode) {
3609 CopyOp =
I.getArgOperand(0);
3610 ConvertOp =
I.getArgOperand(1);
3613 ConvertOp =
I.getArgOperand(0);
3627 Value *ConvertShadow = getShadow(ConvertOp);
3628 Value *AggShadow =
nullptr;
3631 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3632 for (
int i = 1; i < NumUsedElements; ++i) {
3634 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3635 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3638 AggShadow = ConvertShadow;
3641 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3648 Value *ResultShadow = getShadow(CopyOp);
3650 for (
int i = 0; i < NumUsedElements; ++i) {
3652 ResultShadow, ConstantInt::getNullValue(EltTy),
3655 setShadow(&
I, ResultShadow);
3656 setOrigin(&
I, getOrigin(CopyOp));
3658 setShadow(&
I, getCleanShadow(&
I));
3659 setOrigin(&
I, getCleanOrigin());
3667 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3670 return CreateShadowCast(IRB, S2,
T,
true);
3678 return CreateShadowCast(IRB, S2,
T,
true);
3695 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3701 Value *S2 = getShadow(&
I, 1);
3703 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3704 Value *V1 =
I.getOperand(0);
3705 Value *V2 =
I.getOperand(1);
3707 {IRB.CreateBitCast(S1, V1->getType()), V2});
3709 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3710 setOriginForNaryOp(
I);
3715 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3716 unsigned X86_MMXSizeInBits = 64) {
3717 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3718 "Illegal MMX vector element size");
3720 X86_MMXSizeInBits / EltSizeInBits);
3727 case Intrinsic::x86_sse2_packsswb_128:
3728 case Intrinsic::x86_sse2_packuswb_128:
3729 return Intrinsic::x86_sse2_packsswb_128;
3731 case Intrinsic::x86_sse2_packssdw_128:
3732 case Intrinsic::x86_sse41_packusdw:
3733 return Intrinsic::x86_sse2_packssdw_128;
3735 case Intrinsic::x86_avx2_packsswb:
3736 case Intrinsic::x86_avx2_packuswb:
3737 return Intrinsic::x86_avx2_packsswb;
3739 case Intrinsic::x86_avx2_packssdw:
3740 case Intrinsic::x86_avx2_packusdw:
3741 return Intrinsic::x86_avx2_packssdw;
3743 case Intrinsic::x86_mmx_packsswb:
3744 case Intrinsic::x86_mmx_packuswb:
3745 return Intrinsic::x86_mmx_packsswb;
3747 case Intrinsic::x86_mmx_packssdw:
3748 return Intrinsic::x86_mmx_packssdw;
3750 case Intrinsic::x86_avx512_packssdw_512:
3751 case Intrinsic::x86_avx512_packusdw_512:
3752 return Intrinsic::x86_avx512_packssdw_512;
3754 case Intrinsic::x86_avx512_packsswb_512:
3755 case Intrinsic::x86_avx512_packuswb_512:
3756 return Intrinsic::x86_avx512_packsswb_512;
3772 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3773 unsigned MMXEltSizeInBits = 0) {
3777 Value *S2 = getShadow(&
I, 1);
3778 assert(
S1->getType()->isVectorTy());
3784 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3785 if (MMXEltSizeInBits) {
3793 if (MMXEltSizeInBits) {
3799 {S1_ext, S2_ext},
nullptr,
3800 "_msprop_vector_pack");
3801 if (MMXEltSizeInBits)
3804 setOriginForNaryOp(
I);
3808 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3809 SmallVector<Constant *, 4>
R(Width);
3821 const unsigned Width =
3828 Value *DstMaskV = createDppMask(Width, DstMask);
3845 void handleDppIntrinsic(IntrinsicInst &
I) {
3848 Value *S0 = getShadow(&
I, 0);
3852 const unsigned Width =
3854 assert(Width == 2 || Width == 4 || Width == 8);
3857 const unsigned SrcMask =
Mask >> 4;
3858 const unsigned DstMask =
Mask & 0xf;
3861 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3866 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3873 setOriginForNaryOp(
I);
3877 C = CreateAppToShadowCast(IRB,
C);
3886 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3891 Value *Sc = getShadow(&
I, 2);
3892 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3897 C = convertBlendvToSelectMask(IRB,
C);
3898 Sc = convertBlendvToSelectMask(IRB, Sc);
3904 handleSelectLikeInst(
I,
C,
T,
F);
3908 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3909 const unsigned SignificantBitsPerResultElement = 16;
3911 unsigned ZeroBitsPerResultElement =
3915 auto *Shadow0 = getShadow(&
I, 0);
3916 auto *Shadow1 = getShadow(&
I, 1);
3921 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3924 setOriginForNaryOp(
I);
3946 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3948 unsigned EltSizeInBits = 0) {
3951 [[maybe_unused]] FixedVectorType *
ReturnType =
3956 Value *Va =
nullptr;
3957 Value *Vb =
nullptr;
3958 Value *Sa =
nullptr;
3959 Value *Sb =
nullptr;
3961 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3962 if (
I.arg_size() == 2) {
3963 Va =
I.getOperand(0);
3964 Vb =
I.getOperand(1);
3966 Sa = getShadow(&
I, 0);
3967 Sb = getShadow(&
I, 1);
3968 }
else if (
I.arg_size() == 3) {
3970 Va =
I.getOperand(1);
3971 Vb =
I.getOperand(2);
3973 Sa = getShadow(&
I, 1);
3974 Sb = getShadow(&
I, 2);
3983 if (
I.arg_size() == 3) {
3984 [[maybe_unused]]
auto *AccumulatorType =
3986 assert(AccumulatorType == ReturnType);
3989 FixedVectorType *ImplicitReturnType =
3992 if (EltSizeInBits) {
3994 getMMXVectorTy(EltSizeInBits * ReductionFactor,
4006 ReturnType->getNumElements() * ReductionFactor);
4023 VaInt = CreateAppToShadowCast(IRB, Va);
4024 VbInt = CreateAppToShadowCast(IRB, Vb);
4031 And = handleBitwiseAnd(IRB, VaNonZero, VbNonZero, SaNonZero, SbNonZero);
4053 ImplicitReturnType);
4058 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
4061 if (
I.arg_size() == 3)
4062 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
4064 setShadow(&
I, OutShadow);
4065 setOriginForNaryOp(
I);
4071 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4073 Type *ResTy = getShadowTy(&
I);
4074 auto *Shadow0 = getShadow(&
I, 0);
4075 auto *Shadow1 = getShadow(&
I, 1);
4080 setOriginForNaryOp(
I);
4086 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4088 auto *Shadow0 = getShadow(&
I, 0);
4089 auto *Shadow1 = getShadow(&
I, 1);
4091 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4093 setOriginForNaryOp(
I);
4102 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4107 if (AllowShadowCast)
4108 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4112 setOriginForNaryOp(
I);
4122 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4126 Value *Shadow0 = getShadow(&
I, 0);
4132 setOriginForNaryOp(
I);
4138 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4142 Value *OperandShadow = getShadow(&
I, 0);
4144 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4152 setOrigin(&
I, getOrigin(&
I, 0));
4158 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4162 Value *OperandShadow = getShadow(&
I, 0);
4163 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4171 setOrigin(&
I, getOrigin(&
I, 0));
4174 void handleStmxcsr(IntrinsicInst &
I) {
4176 Value *Addr =
I.getArgOperand(0);
4179 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4184 insertCheckShadowOf(Addr, &
I);
4187 void handleLdmxcsr(IntrinsicInst &
I) {
4192 Value *Addr =
I.getArgOperand(0);
4195 Value *ShadowPtr, *OriginPtr;
4196 std::tie(ShadowPtr, OriginPtr) =
4197 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4200 insertCheckShadowOf(Addr, &
I);
4203 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4205 insertCheckShadow(Shadow, Origin, &
I);
4208 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4210 Value *Ptr =
I.getArgOperand(0);
4211 MaybeAlign
Align =
I.getParamAlign(0);
4213 Value *PassThru =
I.getArgOperand(2);
4216 insertCheckShadowOf(Ptr, &
I);
4217 insertCheckShadowOf(Mask, &
I);
4220 if (!PropagateShadow) {
4221 setShadow(&
I, getCleanShadow(&
I));
4222 setOrigin(&
I, getCleanOrigin());
4226 Type *ShadowTy = getShadowTy(&
I);
4228 auto [ShadowPtr, OriginPtr] =
4229 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
false);
4233 getShadow(PassThru),
"_msmaskedexpload");
4235 setShadow(&
I, Shadow);
4238 setOrigin(&
I, getCleanOrigin());
4241 void handleMaskedCompressStore(IntrinsicInst &
I) {
4243 Value *Values =
I.getArgOperand(0);
4244 Value *Ptr =
I.getArgOperand(1);
4245 MaybeAlign
Align =
I.getParamAlign(1);
4249 insertCheckShadowOf(Ptr, &
I);
4250 insertCheckShadowOf(Mask, &
I);
4253 Value *Shadow = getShadow(Values);
4254 Type *ElementShadowTy =
4256 auto [ShadowPtr, OriginPtrs] =
4257 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
true);
4264 void handleMaskedGather(IntrinsicInst &
I) {
4266 Value *Ptrs =
I.getArgOperand(0);
4267 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4269 Value *PassThru =
I.getArgOperand(2);
4271 Type *PtrsShadowTy = getShadowTy(Ptrs);
4273 insertCheckShadowOf(Mask, &
I);
4277 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4280 if (!PropagateShadow) {
4281 setShadow(&
I, getCleanShadow(&
I));
4282 setOrigin(&
I, getCleanOrigin());
4286 Type *ShadowTy = getShadowTy(&
I);
4288 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4289 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4293 getShadow(PassThru),
"_msmaskedgather");
4295 setShadow(&
I, Shadow);
4298 setOrigin(&
I, getCleanOrigin());
4301 void handleMaskedScatter(IntrinsicInst &
I) {
4303 Value *Values =
I.getArgOperand(0);
4304 Value *Ptrs =
I.getArgOperand(1);
4305 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4308 Type *PtrsShadowTy = getShadowTy(Ptrs);
4310 insertCheckShadowOf(Mask, &
I);
4314 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4317 Value *Shadow = getShadow(Values);
4318 Type *ElementShadowTy =
4320 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4321 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4332 void handleMaskedStore(IntrinsicInst &
I) {
4334 Value *
V =
I.getArgOperand(0);
4335 Value *Ptr =
I.getArgOperand(1);
4336 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4338 Value *Shadow = getShadow(V);
4341 insertCheckShadowOf(Ptr, &
I);
4342 insertCheckShadowOf(Mask, &
I);
4347 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4348 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4352 if (!MS.TrackOrigins)
4355 auto &
DL =
F.getDataLayout();
4356 paintOrigin(IRB, getOrigin(V), OriginPtr,
4365 void handleMaskedLoad(IntrinsicInst &
I) {
4367 Value *Ptr =
I.getArgOperand(0);
4368 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4370 Value *PassThru =
I.getArgOperand(2);
4373 insertCheckShadowOf(Ptr, &
I);
4374 insertCheckShadowOf(Mask, &
I);
4377 if (!PropagateShadow) {
4378 setShadow(&
I, getCleanShadow(&
I));
4379 setOrigin(&
I, getCleanOrigin());
4383 Type *ShadowTy = getShadowTy(&
I);
4384 Value *ShadowPtr, *OriginPtr;
4385 std::tie(ShadowPtr, OriginPtr) =
4386 getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment,
false);
4388 getShadow(PassThru),
"_msmaskedld"));
4390 if (!MS.TrackOrigins)
4397 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4402 setOrigin(&
I, Origin);
4418 void handleAVXMaskedStore(IntrinsicInst &
I) {
4423 Value *Dst =
I.getArgOperand(0);
4424 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4429 Value *Src =
I.getArgOperand(2);
4434 Value *SrcShadow = getShadow(Src);
4437 insertCheckShadowOf(Dst, &
I);
4438 insertCheckShadowOf(Mask, &
I);
4441 Value *DstShadowPtr;
4442 Value *DstOriginPtr;
4443 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4444 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4446 SmallVector<Value *, 2> ShadowArgs;
4447 ShadowArgs.
append(1, DstShadowPtr);
4448 ShadowArgs.
append(1, Mask);
4459 if (!MS.TrackOrigins)
4463 auto &
DL =
F.getDataLayout();
4464 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4465 DL.getTypeStoreSize(SrcShadow->
getType()),
4484 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4489 Value *Src =
I.getArgOperand(0);
4490 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4498 insertCheckShadowOf(Mask, &
I);
4501 Type *SrcShadowTy = getShadowTy(Src);
4502 Value *SrcShadowPtr, *SrcOriginPtr;
4503 std::tie(SrcShadowPtr, SrcOriginPtr) =
4504 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4506 SmallVector<Value *, 2> ShadowArgs;
4507 ShadowArgs.
append(1, SrcShadowPtr);
4508 ShadowArgs.
append(1, Mask);
4517 if (!MS.TrackOrigins)
4524 setOrigin(&
I, PtrSrcOrigin);
4533 assert(isFixedIntVector(Idx));
4534 auto IdxVectorSize =
4542 auto *IdxShadow = getShadow(Idx);
4547 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4552 void handleAVXVpermilvar(IntrinsicInst &
I) {
4554 Value *Shadow = getShadow(&
I, 0);
4555 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4559 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4561 {Shadow, I.getArgOperand(1)});
4564 setOriginForNaryOp(
I);
4569 void handleAVXVpermi2var(IntrinsicInst &
I) {
4574 [[maybe_unused]]
auto ArgVectorSize =
4577 ->getNumElements() == ArgVectorSize);
4579 ->getNumElements() == ArgVectorSize);
4580 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4581 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4582 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4584 Value *AShadow = getShadow(&
I, 0);
4585 Value *Idx =
I.getArgOperand(1);
4586 Value *BShadow = getShadow(&
I, 2);
4588 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4592 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4593 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4595 {AShadow, Idx, BShadow});
4597 setOriginForNaryOp(
I);
4600 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4604 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4608 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4609 return isFixedIntVectorTy(
V->getType());
4612 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4613 return isFixedFPVectorTy(
V->getType());
4635 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4640 Value *WriteThrough;
4644 WriteThrough =
I.getOperand(2);
4645 Mask =
I.getOperand(3);
4648 WriteThrough =
I.getOperand(1);
4649 Mask =
I.getOperand(2);
4654 assert(isFixedIntVector(WriteThrough));
4656 unsigned ANumElements =
4658 [[maybe_unused]]
unsigned WriteThruNumElements =
4660 assert(ANumElements == WriteThruNumElements ||
4661 ANumElements * 2 == WriteThruNumElements);
4664 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4665 assert(ANumElements == MaskNumElements ||
4666 ANumElements * 2 == MaskNumElements);
4668 assert(WriteThruNumElements == MaskNumElements);
4672 insertCheckShadowOf(Mask, &
I);
4682 Value *AShadow = getShadow(
A);
4683 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4685 if (ANumElements * 2 == MaskNumElements) {
4697 "_ms_mask_bitcast");
4707 getShadowTy(&
I),
"_ms_a_shadow");
4709 Value *WriteThroughShadow = getShadow(WriteThrough);
4711 "_ms_writethru_select");
4713 setShadow(&
I, Shadow);
4714 setOriginForNaryOp(
I);
4722 void handleBmiIntrinsic(IntrinsicInst &
I) {
4724 Type *ShadowTy = getShadowTy(&
I);
4727 Value *SMask = getShadow(&
I, 1);
4732 {getShadow(&I, 0), I.getOperand(1)});
4735 setOriginForNaryOp(
I);
4738 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4739 SmallVector<int, 8>
Mask;
4740 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4754 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4759 "pclmul 3rd operand must be a constant");
4762 getPclmulMask(Width, Imm & 0x01));
4764 getPclmulMask(Width, Imm & 0x10));
4765 ShadowAndOriginCombiner SOC(
this, IRB);
4766 SOC.Add(Shuf0, getOrigin(&
I, 0));
4767 SOC.Add(Shuf1, getOrigin(&
I, 1));
4772 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4777 Value *Second = getShadow(&
I, 1);
4779 SmallVector<int, 16>
Mask;
4780 Mask.push_back(Width);
4781 for (
unsigned i = 1; i < Width; i++)
4785 setShadow(&
I, Shadow);
4786 setOriginForNaryOp(
I);
4789 void handleVtestIntrinsic(IntrinsicInst &
I) {
4791 Value *Shadow0 = getShadow(&
I, 0);
4792 Value *Shadow1 = getShadow(&
I, 1);
4798 setShadow(&
I, Shadow);
4799 setOriginForNaryOp(
I);
4802 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4807 Value *Second = getShadow(&
I, 1);
4810 SmallVector<int, 16>
Mask;
4811 Mask.push_back(Width);
4812 for (
unsigned i = 1; i < Width; i++)
4816 setShadow(&
I, Shadow);
4817 setOriginForNaryOp(
I);
4823 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4824 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4829 ShadowAndOriginCombiner SC(
this, IRB);
4830 SC.Add(
I.getArgOperand(0));
4838 void handleAbsIntrinsic(IntrinsicInst &
I) {
4840 Value *Src =
I.getArgOperand(0);
4841 Value *IsIntMinPoison =
I.getArgOperand(1);
4843 assert(
I.getType()->isIntOrIntVectorTy());
4845 assert(Src->getType() ==
I.getType());
4851 Value *SrcShadow = getShadow(Src);
4855 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4858 Value *PoisonedShadow = getPoisonedShadow(Src);
4859 Value *PoisonedIfIntMinShadow =
4862 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4864 setShadow(&
I, Shadow);
4865 setOrigin(&
I, getOrigin(&
I, 0));
4868 void handleIsFpClass(IntrinsicInst &
I) {
4870 Value *Shadow = getShadow(&
I, 0);
4871 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4872 setOrigin(&
I, getOrigin(&
I, 0));
4875 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4877 Value *Shadow0 = getShadow(&
I, 0);
4878 Value *Shadow1 = getShadow(&
I, 1);
4881 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4887 setShadow(&
I, Shadow);
4888 setOriginForNaryOp(
I);
4894 Value *Shadow = getShadow(V);
4916 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4921 Value *WriteThrough =
I.getOperand(1);
4925 assert(isFixedIntVector(WriteThrough));
4927 unsigned ANumElements =
4929 unsigned OutputNumElements =
4931 assert(ANumElements == OutputNumElements ||
4932 ANumElements * 2 == OutputNumElements);
4935 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4936 insertCheckShadowOf(Mask, &
I);
4947 if (ANumElements != OutputNumElements) {
4949 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4956 Value *AShadow = getShadow(
A);
4960 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4970 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4971 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4973 Value *WriteThroughShadow = getShadow(WriteThrough);
4976 setShadow(&
I, Shadow);
4977 setOriginForNaryOp(
I);
5004 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
5005 unsigned WriteThruIndex,
5006 unsigned MaskIndex) {
5009 unsigned NumArgs =
I.arg_size();
5010 assert(AIndex < NumArgs);
5011 assert(WriteThruIndex < NumArgs);
5012 assert(MaskIndex < NumArgs);
5013 assert(AIndex != WriteThruIndex);
5014 assert(AIndex != MaskIndex);
5015 assert(WriteThruIndex != MaskIndex);
5017 Value *
A =
I.getOperand(AIndex);
5018 Value *WriteThru =
I.getOperand(WriteThruIndex);
5022 assert(isFixedFPVector(WriteThru));
5024 [[maybe_unused]]
unsigned ANumElements =
5026 unsigned OutputNumElements =
5028 assert(ANumElements == OutputNumElements);
5030 for (
unsigned i = 0; i < NumArgs; ++i) {
5031 if (i != AIndex && i != WriteThruIndex) {
5034 assert(
I.getOperand(i)->getType()->isIntegerTy());
5035 insertCheckShadowOf(
I.getOperand(i), &
I);
5040 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
5042 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
5049 Value *AShadow = getShadow(
A);
5055 Value *WriteThruShadow = getShadow(WriteThru);
5058 setShadow(&
I, Shadow);
5060 setOriginForNaryOp(
I);
5070 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5076 Value *WriteThrough =
I.getOperand(2);
5083 insertCheckShadowOf(Mask, &
I);
5087 unsigned NumElements =
5089 assert(NumElements == 8);
5090 assert(
A->getType() ==
B->getType());
5092 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5095 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5096 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5098 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5100 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5107 Value *AShadow = getShadow(
A);
5108 Value *DstLowerShadow =
5109 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5111 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5114 setShadow(&
I, DstShadow);
5115 setOriginForNaryOp(
I);
5145 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5156 ->getScalarSizeInBits() == 8);
5158 assert(
A->getType() ==
X->getType());
5160 assert(
B->getType()->isIntegerTy());
5161 assert(
B->getType()->getScalarSizeInBits() == 8);
5163 assert(
I.getType() ==
A->getType());
5165 Value *AShadow = getShadow(
A);
5166 Value *XShadow = getShadow(
X);
5167 Value *BZeroShadow = getCleanShadow(
B);
5170 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5172 {X, AShadow, BZeroShadow});
5174 {XShadow, A, BZeroShadow});
5177 Value *BShadow = getShadow(
B);
5178 Value *BBroadcastShadow = getCleanShadow(AShadow);
5183 for (
unsigned i = 0; i < NumElements; i++)
5187 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5188 setOriginForNaryOp(
I);
5202 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5203 unsigned int numArgs =
I.arg_size();
5206 assert(
I.getType()->isStructTy());
5216 assert(4 <= numArgs && numArgs <= 6);
5230 for (
unsigned int i = 0; i < numArgs - 2; i++)
5231 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5234 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5238 insertCheckShadowOf(LaneNumber, &
I);
5241 Value *Src =
I.getArgOperand(numArgs - 1);
5242 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5244 Type *SrcShadowTy = getShadowTy(Src);
5245 auto [SrcShadowPtr, SrcOriginPtr] =
5246 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5256 if (!MS.TrackOrigins)
5260 setOrigin(&
I, PtrSrcOrigin);
5277 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5281 int numArgOperands =
I.arg_size();
5284 assert(numArgOperands >= 1);
5285 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5287 int skipTrailingOperands = 1;
5290 insertCheckShadowOf(Addr, &
I);
5294 skipTrailingOperands++;
5295 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5297 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5300 SmallVector<Value *, 8> ShadowArgs;
5302 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5304 Value *Shadow = getShadow(&
I, i);
5305 ShadowArgs.
append(1, Shadow);
5322 (numArgOperands - skipTrailingOperands));
5323 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5327 I.getArgOperand(numArgOperands - skipTrailingOperands));
5329 Value *OutputShadowPtr, *OutputOriginPtr;
5331 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5332 Addr, IRB, OutputShadowTy,
Align(1),
true);
5333 ShadowArgs.
append(1, OutputShadowPtr);
5339 if (MS.TrackOrigins) {
5347 OriginCombiner OC(
this, IRB);
5348 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5349 OC.Add(
I.getArgOperand(i));
5351 const DataLayout &
DL =
F.getDataLayout();
5352 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5385 void handleNEONMatrixMultiply(IntrinsicInst &
I,
unsigned int ARows,
5386 unsigned int ACols,
unsigned int BRows,
5387 unsigned int BCols) {
5391 Value *
R =
I.getArgOperand(0);
5392 Value *
A =
I.getArgOperand(1);
5393 Value *
B =
I.getArgOperand(2);
5395 assert(
I.getType() ==
R->getType());
5421 Value *ShadowR = getShadow(&
I, 0);
5422 Value *ShadowA = getShadow(&
I, 1);
5423 Value *ShadowB = getShadow(&
I, 2);
5434 I.getType(),
I.getIntrinsicID(), {getCleanShadow(R), ShadowA, ShadowB});
5447 setShadow(&
I, IRB.
CreateOr(ShadowAB, ShadowR));
5448 setOriginForNaryOp(
I);
5473 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5475 unsigned int trailingVerbatimArgs) {
5478 assert(trailingVerbatimArgs <
I.arg_size());
5480 SmallVector<Value *, 8> ShadowArgs;
5482 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5483 Value *Shadow = getShadow(&
I, i);
5491 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5493 Value *Arg =
I.getArgOperand(i);
5499 Value *CombinedShadow = CI;
5502 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5505 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5506 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5511 setOriginForNaryOp(
I);
5517 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5523 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5524 switch (
I.getIntrinsicID()) {
5525 case Intrinsic::uadd_with_overflow:
5526 case Intrinsic::sadd_with_overflow:
5527 case Intrinsic::usub_with_overflow:
5528 case Intrinsic::ssub_with_overflow:
5529 case Intrinsic::umul_with_overflow:
5530 case Intrinsic::smul_with_overflow:
5531 handleArithmeticWithOverflow(
I);
5533 case Intrinsic::abs:
5534 handleAbsIntrinsic(
I);
5536 case Intrinsic::bitreverse:
5537 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5540 case Intrinsic::is_fpclass:
5543 case Intrinsic::lifetime_start:
5544 handleLifetimeStart(
I);
5546 case Intrinsic::launder_invariant_group:
5547 case Intrinsic::strip_invariant_group:
5548 handleInvariantGroup(
I);
5550 case Intrinsic::bswap:
5553 case Intrinsic::ctlz:
5554 case Intrinsic::cttz:
5555 handleCountLeadingTrailingZeros(
I);
5557 case Intrinsic::masked_compressstore:
5558 handleMaskedCompressStore(
I);
5560 case Intrinsic::masked_expandload:
5561 handleMaskedExpandLoad(
I);
5563 case Intrinsic::masked_gather:
5564 handleMaskedGather(
I);
5566 case Intrinsic::masked_scatter:
5567 handleMaskedScatter(
I);
5569 case Intrinsic::masked_store:
5570 handleMaskedStore(
I);
5572 case Intrinsic::masked_load:
5573 handleMaskedLoad(
I);
5575 case Intrinsic::vector_reduce_and:
5576 handleVectorReduceAndIntrinsic(
I);
5578 case Intrinsic::vector_reduce_or:
5579 handleVectorReduceOrIntrinsic(
I);
5582 case Intrinsic::vector_reduce_add:
5583 case Intrinsic::vector_reduce_xor:
5584 case Intrinsic::vector_reduce_mul:
5587 case Intrinsic::vector_reduce_smax:
5588 case Intrinsic::vector_reduce_smin:
5589 case Intrinsic::vector_reduce_umax:
5590 case Intrinsic::vector_reduce_umin:
5593 case Intrinsic::vector_reduce_fmax:
5594 case Intrinsic::vector_reduce_fmin:
5595 handleVectorReduceIntrinsic(
I,
false);
5598 case Intrinsic::vector_reduce_fadd:
5599 case Intrinsic::vector_reduce_fmul:
5600 handleVectorReduceWithStarterIntrinsic(
I);
5603 case Intrinsic::scmp:
5604 case Intrinsic::ucmp: {
5609 case Intrinsic::fshl:
5610 case Intrinsic::fshr:
5611 handleFunnelShift(
I);
5614 case Intrinsic::is_constant:
5616 setShadow(&
I, getCleanShadow(&
I));
5617 setOrigin(&
I, getCleanOrigin());
5627 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5628 switch (
I.getIntrinsicID()) {
5629 case Intrinsic::x86_sse_stmxcsr:
5632 case Intrinsic::x86_sse_ldmxcsr:
5639 case Intrinsic::x86_avx512_vcvtsd2usi64:
5640 case Intrinsic::x86_avx512_vcvtsd2usi32:
5641 case Intrinsic::x86_avx512_vcvtss2usi64:
5642 case Intrinsic::x86_avx512_vcvtss2usi32:
5643 case Intrinsic::x86_avx512_cvttss2usi64:
5644 case Intrinsic::x86_avx512_cvttss2usi:
5645 case Intrinsic::x86_avx512_cvttsd2usi64:
5646 case Intrinsic::x86_avx512_cvttsd2usi:
5647 case Intrinsic::x86_avx512_cvtusi2ss:
5648 case Intrinsic::x86_avx512_cvtusi642sd:
5649 case Intrinsic::x86_avx512_cvtusi642ss:
5650 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5652 case Intrinsic::x86_sse2_cvtsd2si64:
5653 case Intrinsic::x86_sse2_cvtsd2si:
5654 case Intrinsic::x86_sse2_cvtsd2ss:
5655 case Intrinsic::x86_sse2_cvttsd2si64:
5656 case Intrinsic::x86_sse2_cvttsd2si:
5657 case Intrinsic::x86_sse_cvtss2si64:
5658 case Intrinsic::x86_sse_cvtss2si:
5659 case Intrinsic::x86_sse_cvttss2si64:
5660 case Intrinsic::x86_sse_cvttss2si:
5661 handleSSEVectorConvertIntrinsic(
I, 1);
5663 case Intrinsic::x86_sse_cvtps2pi:
5664 case Intrinsic::x86_sse_cvttps2pi:
5665 handleSSEVectorConvertIntrinsic(
I, 2);
5673 case Intrinsic::x86_vcvtps2ph_128:
5674 case Intrinsic::x86_vcvtps2ph_256: {
5675 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5684 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5685 handleAVX512VectorConvertFPToInt(
I,
false);
5690 case Intrinsic::x86_sse2_cvtpd2ps:
5691 case Intrinsic::x86_sse2_cvtps2dq:
5692 case Intrinsic::x86_sse2_cvtpd2dq:
5693 case Intrinsic::x86_sse2_cvttps2dq:
5694 case Intrinsic::x86_sse2_cvttpd2dq:
5695 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5696 case Intrinsic::x86_avx_cvt_ps2dq_256:
5697 case Intrinsic::x86_avx_cvt_pd2dq_256:
5698 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5699 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5700 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5711 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5712 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5713 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5714 handleAVX512VectorConvertFPToInt(
I,
true);
5718 case Intrinsic::x86_avx512_psll_w_512:
5719 case Intrinsic::x86_avx512_psll_d_512:
5720 case Intrinsic::x86_avx512_psll_q_512:
5721 case Intrinsic::x86_avx512_pslli_w_512:
5722 case Intrinsic::x86_avx512_pslli_d_512:
5723 case Intrinsic::x86_avx512_pslli_q_512:
5724 case Intrinsic::x86_avx512_psrl_w_512:
5725 case Intrinsic::x86_avx512_psrl_d_512:
5726 case Intrinsic::x86_avx512_psrl_q_512:
5727 case Intrinsic::x86_avx512_psra_w_512:
5728 case Intrinsic::x86_avx512_psra_d_512:
5729 case Intrinsic::x86_avx512_psra_q_512:
5730 case Intrinsic::x86_avx512_psrli_w_512:
5731 case Intrinsic::x86_avx512_psrli_d_512:
5732 case Intrinsic::x86_avx512_psrli_q_512:
5733 case Intrinsic::x86_avx512_psrai_w_512:
5734 case Intrinsic::x86_avx512_psrai_d_512:
5735 case Intrinsic::x86_avx512_psrai_q_512:
5736 case Intrinsic::x86_avx512_psra_q_256:
5737 case Intrinsic::x86_avx512_psra_q_128:
5738 case Intrinsic::x86_avx512_psrai_q_256:
5739 case Intrinsic::x86_avx512_psrai_q_128:
5740 case Intrinsic::x86_avx2_psll_w:
5741 case Intrinsic::x86_avx2_psll_d:
5742 case Intrinsic::x86_avx2_psll_q:
5743 case Intrinsic::x86_avx2_pslli_w:
5744 case Intrinsic::x86_avx2_pslli_d:
5745 case Intrinsic::x86_avx2_pslli_q:
5746 case Intrinsic::x86_avx2_psrl_w:
5747 case Intrinsic::x86_avx2_psrl_d:
5748 case Intrinsic::x86_avx2_psrl_q:
5749 case Intrinsic::x86_avx2_psra_w:
5750 case Intrinsic::x86_avx2_psra_d:
5751 case Intrinsic::x86_avx2_psrli_w:
5752 case Intrinsic::x86_avx2_psrli_d:
5753 case Intrinsic::x86_avx2_psrli_q:
5754 case Intrinsic::x86_avx2_psrai_w:
5755 case Intrinsic::x86_avx2_psrai_d:
5756 case Intrinsic::x86_sse2_psll_w:
5757 case Intrinsic::x86_sse2_psll_d:
5758 case Intrinsic::x86_sse2_psll_q:
5759 case Intrinsic::x86_sse2_pslli_w:
5760 case Intrinsic::x86_sse2_pslli_d:
5761 case Intrinsic::x86_sse2_pslli_q:
5762 case Intrinsic::x86_sse2_psrl_w:
5763 case Intrinsic::x86_sse2_psrl_d:
5764 case Intrinsic::x86_sse2_psrl_q:
5765 case Intrinsic::x86_sse2_psra_w:
5766 case Intrinsic::x86_sse2_psra_d:
5767 case Intrinsic::x86_sse2_psrli_w:
5768 case Intrinsic::x86_sse2_psrli_d:
5769 case Intrinsic::x86_sse2_psrli_q:
5770 case Intrinsic::x86_sse2_psrai_w:
5771 case Intrinsic::x86_sse2_psrai_d:
5772 case Intrinsic::x86_mmx_psll_w:
5773 case Intrinsic::x86_mmx_psll_d:
5774 case Intrinsic::x86_mmx_psll_q:
5775 case Intrinsic::x86_mmx_pslli_w:
5776 case Intrinsic::x86_mmx_pslli_d:
5777 case Intrinsic::x86_mmx_pslli_q:
5778 case Intrinsic::x86_mmx_psrl_w:
5779 case Intrinsic::x86_mmx_psrl_d:
5780 case Intrinsic::x86_mmx_psrl_q:
5781 case Intrinsic::x86_mmx_psra_w:
5782 case Intrinsic::x86_mmx_psra_d:
5783 case Intrinsic::x86_mmx_psrli_w:
5784 case Intrinsic::x86_mmx_psrli_d:
5785 case Intrinsic::x86_mmx_psrli_q:
5786 case Intrinsic::x86_mmx_psrai_w:
5787 case Intrinsic::x86_mmx_psrai_d:
5788 handleVectorShiftIntrinsic(
I,
false);
5790 case Intrinsic::x86_avx2_psllv_d:
5791 case Intrinsic::x86_avx2_psllv_d_256:
5792 case Intrinsic::x86_avx512_psllv_d_512:
5793 case Intrinsic::x86_avx2_psllv_q:
5794 case Intrinsic::x86_avx2_psllv_q_256:
5795 case Intrinsic::x86_avx512_psllv_q_512:
5796 case Intrinsic::x86_avx2_psrlv_d:
5797 case Intrinsic::x86_avx2_psrlv_d_256:
5798 case Intrinsic::x86_avx512_psrlv_d_512:
5799 case Intrinsic::x86_avx2_psrlv_q:
5800 case Intrinsic::x86_avx2_psrlv_q_256:
5801 case Intrinsic::x86_avx512_psrlv_q_512:
5802 case Intrinsic::x86_avx2_psrav_d:
5803 case Intrinsic::x86_avx2_psrav_d_256:
5804 case Intrinsic::x86_avx512_psrav_d_512:
5805 case Intrinsic::x86_avx512_psrav_q_128:
5806 case Intrinsic::x86_avx512_psrav_q_256:
5807 case Intrinsic::x86_avx512_psrav_q_512:
5808 handleVectorShiftIntrinsic(
I,
true);
5812 case Intrinsic::x86_sse2_packsswb_128:
5813 case Intrinsic::x86_sse2_packssdw_128:
5814 case Intrinsic::x86_sse2_packuswb_128:
5815 case Intrinsic::x86_sse41_packusdw:
5816 case Intrinsic::x86_avx2_packsswb:
5817 case Intrinsic::x86_avx2_packssdw:
5818 case Intrinsic::x86_avx2_packuswb:
5819 case Intrinsic::x86_avx2_packusdw:
5825 case Intrinsic::x86_avx512_packsswb_512:
5826 case Intrinsic::x86_avx512_packssdw_512:
5827 case Intrinsic::x86_avx512_packuswb_512:
5828 case Intrinsic::x86_avx512_packusdw_512:
5829 handleVectorPackIntrinsic(
I);
5832 case Intrinsic::x86_sse41_pblendvb:
5833 case Intrinsic::x86_sse41_blendvpd:
5834 case Intrinsic::x86_sse41_blendvps:
5835 case Intrinsic::x86_avx_blendv_pd_256:
5836 case Intrinsic::x86_avx_blendv_ps_256:
5837 case Intrinsic::x86_avx2_pblendvb:
5838 handleBlendvIntrinsic(
I);
5841 case Intrinsic::x86_avx_dp_ps_256:
5842 case Intrinsic::x86_sse41_dppd:
5843 case Intrinsic::x86_sse41_dpps:
5844 handleDppIntrinsic(
I);
5847 case Intrinsic::x86_mmx_packsswb:
5848 case Intrinsic::x86_mmx_packuswb:
5849 handleVectorPackIntrinsic(
I, 16);
5852 case Intrinsic::x86_mmx_packssdw:
5853 handleVectorPackIntrinsic(
I, 32);
5856 case Intrinsic::x86_mmx_psad_bw:
5857 handleVectorSadIntrinsic(
I,
true);
5859 case Intrinsic::x86_sse2_psad_bw:
5860 case Intrinsic::x86_avx2_psad_bw:
5861 handleVectorSadIntrinsic(
I);
5887 case Intrinsic::x86_sse2_pmadd_wd:
5888 case Intrinsic::x86_avx2_pmadd_wd:
5889 case Intrinsic::x86_avx512_pmaddw_d_512:
5890 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5891 case Intrinsic::x86_avx2_pmadd_ub_sw:
5892 case Intrinsic::x86_avx512_pmaddubs_w_512:
5893 handleVectorPmaddIntrinsic(
I, 2,
5898 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5899 handleVectorPmaddIntrinsic(
I, 2,
5904 case Intrinsic::x86_mmx_pmadd_wd:
5905 handleVectorPmaddIntrinsic(
I, 2,
6001 case Intrinsic::x86_avx512_vpdpbusd_128:
6002 case Intrinsic::x86_avx512_vpdpbusd_256:
6003 case Intrinsic::x86_avx512_vpdpbusd_512:
6004 case Intrinsic::x86_avx512_vpdpbusds_128:
6005 case Intrinsic::x86_avx512_vpdpbusds_256:
6006 case Intrinsic::x86_avx512_vpdpbusds_512:
6007 case Intrinsic::x86_avx2_vpdpbssd_128:
6008 case Intrinsic::x86_avx2_vpdpbssd_256:
6009 case Intrinsic::x86_avx10_vpdpbssd_512:
6010 case Intrinsic::x86_avx2_vpdpbssds_128:
6011 case Intrinsic::x86_avx2_vpdpbssds_256:
6012 case Intrinsic::x86_avx10_vpdpbssds_512:
6013 case Intrinsic::x86_avx2_vpdpbsud_128:
6014 case Intrinsic::x86_avx2_vpdpbsud_256:
6015 case Intrinsic::x86_avx10_vpdpbsud_512:
6016 case Intrinsic::x86_avx2_vpdpbsuds_128:
6017 case Intrinsic::x86_avx2_vpdpbsuds_256:
6018 case Intrinsic::x86_avx10_vpdpbsuds_512:
6019 case Intrinsic::x86_avx2_vpdpbuud_128:
6020 case Intrinsic::x86_avx2_vpdpbuud_256:
6021 case Intrinsic::x86_avx10_vpdpbuud_512:
6022 case Intrinsic::x86_avx2_vpdpbuuds_128:
6023 case Intrinsic::x86_avx2_vpdpbuuds_256:
6024 case Intrinsic::x86_avx10_vpdpbuuds_512:
6025 handleVectorPmaddIntrinsic(
I, 4,
6121 case Intrinsic::x86_avx512_vpdpwssd_128:
6122 case Intrinsic::x86_avx512_vpdpwssd_256:
6123 case Intrinsic::x86_avx512_vpdpwssd_512:
6124 case Intrinsic::x86_avx512_vpdpwssds_128:
6125 case Intrinsic::x86_avx512_vpdpwssds_256:
6126 case Intrinsic::x86_avx512_vpdpwssds_512:
6127 case Intrinsic::x86_avx2_vpdpwsud_128:
6128 case Intrinsic::x86_avx2_vpdpwsud_256:
6129 case Intrinsic::x86_avx10_vpdpwsud_512:
6130 case Intrinsic::x86_avx2_vpdpwsuds_128:
6131 case Intrinsic::x86_avx2_vpdpwsuds_256:
6132 case Intrinsic::x86_avx10_vpdpwsuds_512:
6133 case Intrinsic::x86_avx2_vpdpwusd_128:
6134 case Intrinsic::x86_avx2_vpdpwusd_256:
6135 case Intrinsic::x86_avx10_vpdpwusd_512:
6136 case Intrinsic::x86_avx2_vpdpwusds_128:
6137 case Intrinsic::x86_avx2_vpdpwusds_256:
6138 case Intrinsic::x86_avx10_vpdpwusds_512:
6139 case Intrinsic::x86_avx2_vpdpwuud_128:
6140 case Intrinsic::x86_avx2_vpdpwuud_256:
6141 case Intrinsic::x86_avx10_vpdpwuud_512:
6142 case Intrinsic::x86_avx2_vpdpwuuds_128:
6143 case Intrinsic::x86_avx2_vpdpwuuds_256:
6144 case Intrinsic::x86_avx10_vpdpwuuds_512:
6145 handleVectorPmaddIntrinsic(
I, 2,
6157 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
6158 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
6159 case Intrinsic::x86_avx512bf16_dpbf16ps_512:
6160 handleVectorPmaddIntrinsic(
I, 2,
6164 case Intrinsic::x86_sse_cmp_ss:
6165 case Intrinsic::x86_sse2_cmp_sd:
6166 case Intrinsic::x86_sse_comieq_ss:
6167 case Intrinsic::x86_sse_comilt_ss:
6168 case Intrinsic::x86_sse_comile_ss:
6169 case Intrinsic::x86_sse_comigt_ss:
6170 case Intrinsic::x86_sse_comige_ss:
6171 case Intrinsic::x86_sse_comineq_ss:
6172 case Intrinsic::x86_sse_ucomieq_ss:
6173 case Intrinsic::x86_sse_ucomilt_ss:
6174 case Intrinsic::x86_sse_ucomile_ss:
6175 case Intrinsic::x86_sse_ucomigt_ss:
6176 case Intrinsic::x86_sse_ucomige_ss:
6177 case Intrinsic::x86_sse_ucomineq_ss:
6178 case Intrinsic::x86_sse2_comieq_sd:
6179 case Intrinsic::x86_sse2_comilt_sd:
6180 case Intrinsic::x86_sse2_comile_sd:
6181 case Intrinsic::x86_sse2_comigt_sd:
6182 case Intrinsic::x86_sse2_comige_sd:
6183 case Intrinsic::x86_sse2_comineq_sd:
6184 case Intrinsic::x86_sse2_ucomieq_sd:
6185 case Intrinsic::x86_sse2_ucomilt_sd:
6186 case Intrinsic::x86_sse2_ucomile_sd:
6187 case Intrinsic::x86_sse2_ucomigt_sd:
6188 case Intrinsic::x86_sse2_ucomige_sd:
6189 case Intrinsic::x86_sse2_ucomineq_sd:
6190 handleVectorCompareScalarIntrinsic(
I);
6193 case Intrinsic::x86_avx_cmp_pd_256:
6194 case Intrinsic::x86_avx_cmp_ps_256:
6195 case Intrinsic::x86_sse2_cmp_pd:
6196 case Intrinsic::x86_sse_cmp_ps:
6197 handleVectorComparePackedIntrinsic(
I);
6200 case Intrinsic::x86_bmi_bextr_32:
6201 case Intrinsic::x86_bmi_bextr_64:
6202 case Intrinsic::x86_bmi_bzhi_32:
6203 case Intrinsic::x86_bmi_bzhi_64:
6204 case Intrinsic::x86_bmi_pdep_32:
6205 case Intrinsic::x86_bmi_pdep_64:
6206 case Intrinsic::x86_bmi_pext_32:
6207 case Intrinsic::x86_bmi_pext_64:
6208 handleBmiIntrinsic(
I);
6211 case Intrinsic::x86_pclmulqdq:
6212 case Intrinsic::x86_pclmulqdq_256:
6213 case Intrinsic::x86_pclmulqdq_512:
6214 handlePclmulIntrinsic(
I);
6217 case Intrinsic::x86_avx_round_pd_256:
6218 case Intrinsic::x86_avx_round_ps_256:
6219 case Intrinsic::x86_sse41_round_pd:
6220 case Intrinsic::x86_sse41_round_ps:
6221 handleRoundPdPsIntrinsic(
I);
6224 case Intrinsic::x86_sse41_round_sd:
6225 case Intrinsic::x86_sse41_round_ss:
6226 handleUnarySdSsIntrinsic(
I);
6229 case Intrinsic::x86_sse2_max_sd:
6230 case Intrinsic::x86_sse_max_ss:
6231 case Intrinsic::x86_sse2_min_sd:
6232 case Intrinsic::x86_sse_min_ss:
6233 handleBinarySdSsIntrinsic(
I);
6236 case Intrinsic::x86_avx_vtestc_pd:
6237 case Intrinsic::x86_avx_vtestc_pd_256:
6238 case Intrinsic::x86_avx_vtestc_ps:
6239 case Intrinsic::x86_avx_vtestc_ps_256:
6240 case Intrinsic::x86_avx_vtestnzc_pd:
6241 case Intrinsic::x86_avx_vtestnzc_pd_256:
6242 case Intrinsic::x86_avx_vtestnzc_ps:
6243 case Intrinsic::x86_avx_vtestnzc_ps_256:
6244 case Intrinsic::x86_avx_vtestz_pd:
6245 case Intrinsic::x86_avx_vtestz_pd_256:
6246 case Intrinsic::x86_avx_vtestz_ps:
6247 case Intrinsic::x86_avx_vtestz_ps_256:
6248 case Intrinsic::x86_avx_ptestc_256:
6249 case Intrinsic::x86_avx_ptestnzc_256:
6250 case Intrinsic::x86_avx_ptestz_256:
6251 case Intrinsic::x86_sse41_ptestc:
6252 case Intrinsic::x86_sse41_ptestnzc:
6253 case Intrinsic::x86_sse41_ptestz:
6254 handleVtestIntrinsic(
I);
6258 case Intrinsic::x86_ssse3_phadd_w:
6259 case Intrinsic::x86_ssse3_phadd_w_128:
6260 case Intrinsic::x86_ssse3_phsub_w:
6261 case Intrinsic::x86_ssse3_phsub_w_128:
6262 handlePairwiseShadowOrIntrinsic(
I, 1,
6266 case Intrinsic::x86_avx2_phadd_w:
6267 case Intrinsic::x86_avx2_phsub_w:
6268 handlePairwiseShadowOrIntrinsic(
I, 2,
6273 case Intrinsic::x86_ssse3_phadd_d:
6274 case Intrinsic::x86_ssse3_phadd_d_128:
6275 case Intrinsic::x86_ssse3_phsub_d:
6276 case Intrinsic::x86_ssse3_phsub_d_128:
6277 handlePairwiseShadowOrIntrinsic(
I, 1,
6281 case Intrinsic::x86_avx2_phadd_d:
6282 case Intrinsic::x86_avx2_phsub_d:
6283 handlePairwiseShadowOrIntrinsic(
I, 2,
6288 case Intrinsic::x86_ssse3_phadd_sw:
6289 case Intrinsic::x86_ssse3_phadd_sw_128:
6290 case Intrinsic::x86_ssse3_phsub_sw:
6291 case Intrinsic::x86_ssse3_phsub_sw_128:
6292 handlePairwiseShadowOrIntrinsic(
I, 1,
6296 case Intrinsic::x86_avx2_phadd_sw:
6297 case Intrinsic::x86_avx2_phsub_sw:
6298 handlePairwiseShadowOrIntrinsic(
I, 2,
6303 case Intrinsic::x86_sse3_hadd_ps:
6304 case Intrinsic::x86_sse3_hadd_pd:
6305 case Intrinsic::x86_sse3_hsub_ps:
6306 case Intrinsic::x86_sse3_hsub_pd:
6307 handlePairwiseShadowOrIntrinsic(
I, 1);
6310 case Intrinsic::x86_avx_hadd_pd_256:
6311 case Intrinsic::x86_avx_hadd_ps_256:
6312 case Intrinsic::x86_avx_hsub_pd_256:
6313 case Intrinsic::x86_avx_hsub_ps_256:
6314 handlePairwiseShadowOrIntrinsic(
I, 2);
6317 case Intrinsic::x86_avx_maskstore_ps:
6318 case Intrinsic::x86_avx_maskstore_pd:
6319 case Intrinsic::x86_avx_maskstore_ps_256:
6320 case Intrinsic::x86_avx_maskstore_pd_256:
6321 case Intrinsic::x86_avx2_maskstore_d:
6322 case Intrinsic::x86_avx2_maskstore_q:
6323 case Intrinsic::x86_avx2_maskstore_d_256:
6324 case Intrinsic::x86_avx2_maskstore_q_256: {
6325 handleAVXMaskedStore(
I);
6329 case Intrinsic::x86_avx_maskload_ps:
6330 case Intrinsic::x86_avx_maskload_pd:
6331 case Intrinsic::x86_avx_maskload_ps_256:
6332 case Intrinsic::x86_avx_maskload_pd_256:
6333 case Intrinsic::x86_avx2_maskload_d:
6334 case Intrinsic::x86_avx2_maskload_q:
6335 case Intrinsic::x86_avx2_maskload_d_256:
6336 case Intrinsic::x86_avx2_maskload_q_256: {
6337 handleAVXMaskedLoad(
I);
6342 case Intrinsic::x86_avx512fp16_add_ph_512:
6343 case Intrinsic::x86_avx512fp16_sub_ph_512:
6344 case Intrinsic::x86_avx512fp16_mul_ph_512:
6345 case Intrinsic::x86_avx512fp16_div_ph_512:
6346 case Intrinsic::x86_avx512fp16_max_ph_512:
6347 case Intrinsic::x86_avx512fp16_min_ph_512:
6348 case Intrinsic::x86_avx512_min_ps_512:
6349 case Intrinsic::x86_avx512_min_pd_512:
6350 case Intrinsic::x86_avx512_max_ps_512:
6351 case Intrinsic::x86_avx512_max_pd_512: {
6356 [[maybe_unused]]
bool Success =
6357 maybeHandleSimpleNomemIntrinsic(
I, 1);
6362 case Intrinsic::x86_avx_vpermilvar_pd:
6363 case Intrinsic::x86_avx_vpermilvar_pd_256:
6364 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6365 case Intrinsic::x86_avx_vpermilvar_ps:
6366 case Intrinsic::x86_avx_vpermilvar_ps_256:
6367 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6368 handleAVXVpermilvar(
I);
6372 case Intrinsic::x86_avx512_vpermi2var_d_128:
6373 case Intrinsic::x86_avx512_vpermi2var_d_256:
6374 case Intrinsic::x86_avx512_vpermi2var_d_512:
6375 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6376 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6377 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6378 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6379 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6380 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6381 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6382 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6383 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6384 case Intrinsic::x86_avx512_vpermi2var_q_128:
6385 case Intrinsic::x86_avx512_vpermi2var_q_256:
6386 case Intrinsic::x86_avx512_vpermi2var_q_512:
6387 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6388 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6389 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6390 handleAVXVpermi2var(
I);
6404 case Intrinsic::x86_avx2_pshuf_b:
6405 case Intrinsic::x86_sse_pshuf_w:
6406 case Intrinsic::x86_ssse3_pshuf_b_128:
6407 case Intrinsic::x86_ssse3_pshuf_b:
6408 case Intrinsic::x86_avx512_pshuf_b_512:
6409 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6415 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6416 case Intrinsic::x86_avx512_mask_pmov_db_512:
6417 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6418 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6421 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6429 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6430 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6431 handleIntrinsicByApplyingToShadow(
I,
6432 Intrinsic::x86_avx512_mask_pmov_dw_512,
6437 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6438 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6439 handleIntrinsicByApplyingToShadow(
I,
6440 Intrinsic::x86_avx512_mask_pmov_db_512,
6445 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6446 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6447 handleIntrinsicByApplyingToShadow(
I,
6448 Intrinsic::x86_avx512_mask_pmov_qb_512,
6453 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6454 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6455 handleIntrinsicByApplyingToShadow(
I,
6456 Intrinsic::x86_avx512_mask_pmov_qw_512,
6461 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6462 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6463 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6464 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6468 handleAVX512VectorDownConvert(
I);
6508 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6509 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6510 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6511 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6512 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6513 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6514 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6515 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6516 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6517 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6518 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6519 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6520 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6560 case Intrinsic::x86_avx512_rcp14_ps_512:
6561 case Intrinsic::x86_avx512_rcp14_ps_256:
6562 case Intrinsic::x86_avx512_rcp14_ps_128:
6563 case Intrinsic::x86_avx512_rcp14_pd_512:
6564 case Intrinsic::x86_avx512_rcp14_pd_256:
6565 case Intrinsic::x86_avx512_rcp14_pd_128:
6566 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6567 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6568 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6569 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6570 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6571 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6572 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6616 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6617 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6618 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6619 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6620 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6621 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6622 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6623 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6624 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6625 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6626 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6627 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6628 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6633 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6634 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6635 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6636 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6637 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6638 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6639 visitGenericScalarHalfwordInst(
I);
6644 case Intrinsic::x86_vgf2p8affineqb_128:
6645 case Intrinsic::x86_vgf2p8affineqb_256:
6646 case Intrinsic::x86_vgf2p8affineqb_512:
6647 handleAVXGF2P8Affine(
I);
6657 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6658 switch (
I.getIntrinsicID()) {
6659 case Intrinsic::aarch64_neon_rshrn:
6660 case Intrinsic::aarch64_neon_sqrshl:
6661 case Intrinsic::aarch64_neon_sqrshrn:
6662 case Intrinsic::aarch64_neon_sqrshrun:
6663 case Intrinsic::aarch64_neon_sqshl:
6664 case Intrinsic::aarch64_neon_sqshlu:
6665 case Intrinsic::aarch64_neon_sqshrn:
6666 case Intrinsic::aarch64_neon_sqshrun:
6667 case Intrinsic::aarch64_neon_srshl:
6668 case Intrinsic::aarch64_neon_sshl:
6669 case Intrinsic::aarch64_neon_uqrshl:
6670 case Intrinsic::aarch64_neon_uqrshrn:
6671 case Intrinsic::aarch64_neon_uqshl:
6672 case Intrinsic::aarch64_neon_uqshrn:
6673 case Intrinsic::aarch64_neon_urshl:
6674 case Intrinsic::aarch64_neon_ushl:
6676 handleVectorShiftIntrinsic(
I,
false);
6681 case Intrinsic::aarch64_neon_fmaxp:
6682 case Intrinsic::aarch64_neon_fminp:
6684 case Intrinsic::aarch64_neon_fmaxnmp:
6685 case Intrinsic::aarch64_neon_fminnmp:
6687 case Intrinsic::aarch64_neon_smaxp:
6688 case Intrinsic::aarch64_neon_sminp:
6689 case Intrinsic::aarch64_neon_umaxp:
6690 case Intrinsic::aarch64_neon_uminp:
6692 case Intrinsic::aarch64_neon_addp:
6694 case Intrinsic::aarch64_neon_faddp:
6696 case Intrinsic::aarch64_neon_saddlp:
6697 case Intrinsic::aarch64_neon_uaddlp: {
6698 handlePairwiseShadowOrIntrinsic(
I, 1);
6703 case Intrinsic::aarch64_neon_fcvtas:
6704 case Intrinsic::aarch64_neon_fcvtau:
6706 case Intrinsic::aarch64_neon_fcvtms:
6707 case Intrinsic::aarch64_neon_fcvtmu:
6709 case Intrinsic::aarch64_neon_fcvtns:
6710 case Intrinsic::aarch64_neon_fcvtnu:
6712 case Intrinsic::aarch64_neon_fcvtps:
6713 case Intrinsic::aarch64_neon_fcvtpu:
6715 case Intrinsic::aarch64_neon_fcvtzs:
6716 case Intrinsic::aarch64_neon_fcvtzu:
6718 case Intrinsic::aarch64_neon_fcvtxn: {
6719 handleNEONVectorConvertIntrinsic(
I);
6724 case Intrinsic::aarch64_neon_faddv:
6725 case Intrinsic::aarch64_neon_saddv:
6726 case Intrinsic::aarch64_neon_uaddv:
6729 case Intrinsic::aarch64_neon_smaxv:
6730 case Intrinsic::aarch64_neon_sminv:
6731 case Intrinsic::aarch64_neon_umaxv:
6732 case Intrinsic::aarch64_neon_uminv:
6736 case Intrinsic::aarch64_neon_fmaxv:
6737 case Intrinsic::aarch64_neon_fminv:
6738 case Intrinsic::aarch64_neon_fmaxnmv:
6739 case Intrinsic::aarch64_neon_fminnmv:
6741 case Intrinsic::aarch64_neon_saddlv:
6742 case Intrinsic::aarch64_neon_uaddlv:
6743 handleVectorReduceIntrinsic(
I,
true);
6746 case Intrinsic::aarch64_neon_ld1x2:
6747 case Intrinsic::aarch64_neon_ld1x3:
6748 case Intrinsic::aarch64_neon_ld1x4:
6749 case Intrinsic::aarch64_neon_ld2:
6750 case Intrinsic::aarch64_neon_ld3:
6751 case Intrinsic::aarch64_neon_ld4:
6752 case Intrinsic::aarch64_neon_ld2r:
6753 case Intrinsic::aarch64_neon_ld3r:
6754 case Intrinsic::aarch64_neon_ld4r: {
6755 handleNEONVectorLoad(
I,
false);
6759 case Intrinsic::aarch64_neon_ld2lane:
6760 case Intrinsic::aarch64_neon_ld3lane:
6761 case Intrinsic::aarch64_neon_ld4lane: {
6762 handleNEONVectorLoad(
I,
true);
6767 case Intrinsic::aarch64_neon_sqxtn:
6768 case Intrinsic::aarch64_neon_sqxtun:
6769 case Intrinsic::aarch64_neon_uqxtn:
6776 case Intrinsic::aarch64_neon_st1x2:
6777 case Intrinsic::aarch64_neon_st1x3:
6778 case Intrinsic::aarch64_neon_st1x4:
6779 case Intrinsic::aarch64_neon_st2:
6780 case Intrinsic::aarch64_neon_st3:
6781 case Intrinsic::aarch64_neon_st4: {
6782 handleNEONVectorStoreIntrinsic(
I,
false);
6786 case Intrinsic::aarch64_neon_st2lane:
6787 case Intrinsic::aarch64_neon_st3lane:
6788 case Intrinsic::aarch64_neon_st4lane: {
6789 handleNEONVectorStoreIntrinsic(
I,
true);
6802 case Intrinsic::aarch64_neon_tbl1:
6803 case Intrinsic::aarch64_neon_tbl2:
6804 case Intrinsic::aarch64_neon_tbl3:
6805 case Intrinsic::aarch64_neon_tbl4:
6806 case Intrinsic::aarch64_neon_tbx1:
6807 case Intrinsic::aarch64_neon_tbx2:
6808 case Intrinsic::aarch64_neon_tbx3:
6809 case Intrinsic::aarch64_neon_tbx4: {
6811 handleIntrinsicByApplyingToShadow(
6812 I,
I.getIntrinsicID(),
6817 case Intrinsic::aarch64_neon_fmulx:
6818 case Intrinsic::aarch64_neon_pmul:
6819 case Intrinsic::aarch64_neon_pmull:
6820 case Intrinsic::aarch64_neon_smull:
6821 case Intrinsic::aarch64_neon_pmull64:
6822 case Intrinsic::aarch64_neon_umull: {
6823 handleNEONVectorMultiplyIntrinsic(
I);
6827 case Intrinsic::aarch64_neon_smmla:
6828 case Intrinsic::aarch64_neon_ummla:
6829 case Intrinsic::aarch64_neon_usmmla:
6830 handleNEONMatrixMultiply(
I, 2, 8, 8,
6841 void visitIntrinsicInst(IntrinsicInst &
I) {
6842 if (maybeHandleCrossPlatformIntrinsic(
I))
6845 if (maybeHandleX86SIMDIntrinsic(
I))
6848 if (maybeHandleArmSIMDIntrinsic(
I))
6851 if (maybeHandleUnknownIntrinsic(
I))
6854 visitInstruction(
I);
6857 void visitLibAtomicLoad(CallBase &CB) {
6868 Value *NewOrdering =
6872 NextNodeIRBuilder NextIRB(&CB);
6873 Value *SrcShadowPtr, *SrcOriginPtr;
6874 std::tie(SrcShadowPtr, SrcOriginPtr) =
6875 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6877 Value *DstShadowPtr =
6878 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6882 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6883 if (MS.TrackOrigins) {
6884 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6886 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6887 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6891 void visitLibAtomicStore(CallBase &CB) {
6898 Value *NewOrdering =
6902 Value *DstShadowPtr =
6912 void visitCallBase(CallBase &CB) {
6920 visitAsmInstruction(CB);
6922 visitInstruction(CB);
6931 case LibFunc_atomic_load:
6933 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6937 visitLibAtomicLoad(CB);
6939 case LibFunc_atomic_store:
6940 visitLibAtomicStore(CB);
6956 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6960 Func->removeFnAttrs(
B);
6966 bool MayCheckCall = MS.EagerChecks;
6970 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6973 unsigned ArgOffset = 0;
6976 if (!
A->getType()->isSized()) {
6977 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6981 if (
A->getType()->isScalableTy()) {
6982 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6984 insertCheckShadowOf(
A, &CB);
6989 const DataLayout &
DL =
F.getDataLayout();
6993 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6996 insertCheckShadowOf(
A, &CB);
6997 Size =
DL.getTypeAllocSize(
A->getType());
7003 Value *ArgShadow = getShadow(
A);
7004 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
7006 <<
" Shadow: " << *ArgShadow <<
"\n");
7010 assert(
A->getType()->isPointerTy() &&
7011 "ByVal argument is not a pointer!");
7016 MaybeAlign Alignment = std::nullopt;
7019 Value *AShadowPtr, *AOriginPtr;
7020 std::tie(AShadowPtr, AOriginPtr) =
7021 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
7023 if (!PropagateShadow) {
7030 if (MS.TrackOrigins) {
7031 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
7045 Size =
DL.getTypeAllocSize(
A->getType());
7051 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
7053 getOriginPtrForArgument(IRB, ArgOffset));
7056 assert(Store !=
nullptr);
7065 if (FT->isVarArg()) {
7066 VAHelper->visitCallBase(CB, IRB);
7076 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
7077 setShadow(&CB, getCleanShadow(&CB));
7078 setOrigin(&CB, getCleanOrigin());
7084 Value *
Base = getShadowPtrForRetval(IRBBefore);
7085 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
7097 setShadow(&CB, getCleanShadow(&CB));
7098 setOrigin(&CB, getCleanOrigin());
7105 "Could not find insertion point for retval shadow load");
7108 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
7111 setShadow(&CB, RetvalShadow);
7112 if (MS.TrackOrigins)
7113 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
7118 RetVal =
I->getOperand(0);
7121 return I->isMustTailCall();
7126 void visitReturnInst(ReturnInst &
I) {
7128 Value *RetVal =
I.getReturnValue();
7134 Value *ShadowPtr = getShadowPtrForRetval(IRB);
7135 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
7136 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
7139 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
7141 Value *Shadow = getShadow(RetVal);
7142 bool StoreOrigin =
true;
7144 insertCheckShadowOf(RetVal, &
I);
7145 Shadow = getCleanShadow(RetVal);
7146 StoreOrigin =
false;
7153 if (MS.TrackOrigins && StoreOrigin)
7154 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
7158 void visitPHINode(PHINode &
I) {
7160 if (!PropagateShadow) {
7161 setShadow(&
I, getCleanShadow(&
I));
7162 setOrigin(&
I, getCleanOrigin());
7166 ShadowPHINodes.push_back(&
I);
7167 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
7169 if (MS.TrackOrigins)
7171 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
7174 Value *getLocalVarIdptr(AllocaInst &
I) {
7175 ConstantInt *IntConst =
7176 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
7177 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
7182 Value *getLocalVarDescription(AllocaInst &
I) {
7188 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
7190 Value *ShadowBase, *OriginBase;
7191 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
7195 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
7198 if (PoisonStack && MS.TrackOrigins) {
7199 Value *Idptr = getLocalVarIdptr(
I);
7201 Value *Descr = getLocalVarDescription(
I);
7202 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
7203 {&I, Len, Idptr, Descr});
7205 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
7211 Value *Descr = getLocalVarDescription(
I);
7213 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
7215 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
7219 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
7222 NextNodeIRBuilder IRB(InsPoint);
7223 const DataLayout &
DL =
F.getDataLayout();
7224 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
7226 if (
I.isArrayAllocation())
7230 if (MS.CompileKernel)
7231 poisonAllocaKmsan(
I, IRB, Len);
7233 poisonAllocaUserspace(
I, IRB, Len);
7236 void visitAllocaInst(AllocaInst &
I) {
7237 setShadow(&
I, getCleanShadow(&
I));
7238 setOrigin(&
I, getCleanOrigin());
7244 void visitSelectInst(SelectInst &
I) {
7250 handleSelectLikeInst(
I,
B,
C,
D);
7256 Value *Sb = getShadow(
B);
7257 Value *Sc = getShadow(
C);
7258 Value *Sd = getShadow(
D);
7260 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
7261 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
7262 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
7267 if (
I.getType()->isAggregateType()) {
7271 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
7272 }
else if (isScalableNonVectorType(
I.getType())) {
7280 Sa1 = getCleanShadow(getShadowTy(
I.getType()));
7288 C = CreateAppToShadowCast(IRB,
C);
7289 D = CreateAppToShadowCast(IRB,
D);
7296 if (MS.TrackOrigins) {
7299 if (
B->getType()->isVectorTy()) {
7300 B = convertToBool(
B, IRB);
7301 Sb = convertToBool(Sb, IRB);
7309 void visitLandingPadInst(LandingPadInst &
I) {
7312 setShadow(&
I, getCleanShadow(&
I));
7313 setOrigin(&
I, getCleanOrigin());
7316 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7317 setShadow(&
I, getCleanShadow(&
I));
7318 setOrigin(&
I, getCleanOrigin());
7321 void visitFuncletPadInst(FuncletPadInst &
I) {
7322 setShadow(&
I, getCleanShadow(&
I));
7323 setOrigin(&
I, getCleanOrigin());
7326 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7328 void visitExtractValueInst(ExtractValueInst &
I) {
7330 Value *Agg =
I.getAggregateOperand();
7332 Value *AggShadow = getShadow(Agg);
7336 setShadow(&
I, ResShadow);
7337 setOriginForNaryOp(
I);
7340 void visitInsertValueInst(InsertValueInst &
I) {
7343 Value *AggShadow = getShadow(
I.getAggregateOperand());
7344 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7350 setOriginForNaryOp(
I);
7353 void dumpInst(Instruction &
I) {
7357 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7359 errs() <<
"QQQ " <<
I <<
"\n";
7362 void visitResumeInst(ResumeInst &
I) {
7367 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7372 void visitCatchReturnInst(CatchReturnInst &CRI) {
7377 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7386 insertCheckShadowOf(Operand, &
I);
7393 auto Size =
DL.getTypeStoreSize(ElemTy);
7395 if (MS.CompileKernel) {
7396 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7402 auto [ShadowPtr,
_] =
7403 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7413 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7414 int NumRetOutputs = 0;
7421 NumRetOutputs =
ST->getNumElements();
7426 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7427 switch (
Info.Type) {
7435 return NumOutputs - NumRetOutputs;
7438 void visitAsmInstruction(Instruction &
I) {
7454 const DataLayout &
DL =
F.getDataLayout();
7458 int OutputArgs = getNumOutputArgs(IA, CB);
7464 for (
int i = OutputArgs; i < NumOperands; i++) {
7472 for (
int i = 0; i < OutputArgs; i++) {
7478 setShadow(&
I, getCleanShadow(&
I));
7479 setOrigin(&
I, getCleanOrigin());
7482 void visitFreezeInst(FreezeInst &
I) {
7484 setShadow(&
I, getCleanShadow(&
I));
7485 setOrigin(&
I, getCleanOrigin());
7488 void visitInstruction(Instruction &
I) {
7493 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7494 Value *Operand =
I.getOperand(i);
7496 insertCheckShadowOf(Operand, &
I);
7498 setShadow(&
I, getCleanShadow(&
I));
7499 setOrigin(&
I, getCleanOrigin());
7503struct VarArgHelperBase :
public VarArgHelper {
7505 MemorySanitizer &MS;
7506 MemorySanitizerVisitor &MSV;
7508 const unsigned VAListTagSize;
7510 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7511 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7512 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7516 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7522 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7531 return getShadowPtrForVAArgument(IRB, ArgOffset);
7540 ConstantInt::get(MS.IntptrTy, ArgOffset),
7545 unsigned BaseOffset) {
7554 TailSize,
Align(8));
7557 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7559 Value *VAListTag =
I.getArgOperand(0);
7561 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7562 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7565 VAListTagSize, Alignment,
false);
7568 void visitVAStartInst(VAStartInst &
I)
override {
7569 if (
F.getCallingConv() == CallingConv::Win64)
7572 unpoisonVAListTagForInst(
I);
7575 void visitVACopyInst(VACopyInst &
I)
override {
7576 if (
F.getCallingConv() == CallingConv::Win64)
7578 unpoisonVAListTagForInst(
I);
7583struct VarArgAMD64Helper :
public VarArgHelperBase {
7586 static const unsigned AMD64GpEndOffset = 48;
7587 static const unsigned AMD64FpEndOffsetSSE = 176;
7589 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7591 unsigned AMD64FpEndOffset;
7592 AllocaInst *VAArgTLSCopy =
nullptr;
7593 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7594 Value *VAArgOverflowSize =
nullptr;
7596 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7598 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7599 MemorySanitizerVisitor &MSV)
7600 : VarArgHelperBase(
F, MS, MSV, 24) {
7601 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7602 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7603 if (Attr.isStringAttribute() &&
7604 (Attr.getKindAsString() ==
"target-features")) {
7605 if (Attr.getValueAsString().contains(
"-sse"))
7606 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7612 ArgKind classifyArgument(
Value *arg) {
7615 if (
T->isX86_FP80Ty())
7617 if (
T->isFPOrFPVectorTy())
7618 return AK_FloatingPoint;
7619 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7620 return AK_GeneralPurpose;
7621 if (
T->isPointerTy())
7622 return AK_GeneralPurpose;
7634 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7635 unsigned GpOffset = 0;
7636 unsigned FpOffset = AMD64GpEndOffset;
7637 unsigned OverflowOffset = AMD64FpEndOffset;
7638 const DataLayout &
DL =
F.getDataLayout();
7642 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7649 assert(
A->getType()->isPointerTy());
7651 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7652 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7653 unsigned BaseOffset = OverflowOffset;
7654 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7655 Value *OriginBase =
nullptr;
7656 if (MS.TrackOrigins)
7657 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7658 OverflowOffset += AlignedSize;
7661 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7665 Value *ShadowPtr, *OriginPtr;
7666 std::tie(ShadowPtr, OriginPtr) =
7671 if (MS.TrackOrigins)
7675 ArgKind AK = classifyArgument(
A);
7676 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7678 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7680 Value *ShadowBase, *OriginBase =
nullptr;
7682 case AK_GeneralPurpose:
7683 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7684 if (MS.TrackOrigins)
7685 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7689 case AK_FloatingPoint:
7690 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7691 if (MS.TrackOrigins)
7692 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7699 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7700 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7701 unsigned BaseOffset = OverflowOffset;
7702 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7703 if (MS.TrackOrigins) {
7704 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7706 OverflowOffset += AlignedSize;
7709 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7718 Value *Shadow = MSV.getShadow(
A);
7720 if (MS.TrackOrigins) {
7721 Value *Origin = MSV.getOrigin(
A);
7722 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7723 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7729 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7730 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7733 void finalizeInstrumentation()
override {
7734 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7735 "finalizeInstrumentation called twice");
7736 if (!VAStartInstrumentationList.
empty()) {
7743 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7744 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7750 Intrinsic::umin, CopySize,
7754 if (MS.TrackOrigins) {
7755 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7764 for (CallInst *OrigInst : VAStartInstrumentationList) {
7765 NextNodeIRBuilder IRB(OrigInst);
7766 Value *VAListTag = OrigInst->getArgOperand(0);
7768 Value *RegSaveAreaPtrPtr =
7769 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7771 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7773 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7774 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7776 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7778 if (MS.TrackOrigins)
7779 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7780 Alignment, AMD64FpEndOffset);
7781 Value *OverflowArgAreaPtrPtr =
7782 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7783 Value *OverflowArgAreaPtr =
7784 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7785 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7786 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7787 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7791 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7793 if (MS.TrackOrigins) {
7796 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7804struct VarArgAArch64Helper :
public VarArgHelperBase {
7805 static const unsigned kAArch64GrArgSize = 64;
7806 static const unsigned kAArch64VrArgSize = 128;
7808 static const unsigned AArch64GrBegOffset = 0;
7809 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7811 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7812 static const unsigned AArch64VrEndOffset =
7813 AArch64VrBegOffset + kAArch64VrArgSize;
7814 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7816 AllocaInst *VAArgTLSCopy =
nullptr;
7817 Value *VAArgOverflowSize =
nullptr;
7819 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7821 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7822 MemorySanitizerVisitor &MSV)
7823 : VarArgHelperBase(
F, MS, MSV, 32) {}
7826 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7827 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7828 return {AK_GeneralPurpose, 1};
7829 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7830 return {AK_FloatingPoint, 1};
7832 if (
T->isArrayTy()) {
7833 auto R = classifyArgument(
T->getArrayElementType());
7834 R.second *=
T->getScalarType()->getArrayNumElements();
7839 auto R = classifyArgument(FV->getScalarType());
7840 R.second *= FV->getNumElements();
7845 return {AK_Memory, 0};
7857 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7858 unsigned GrOffset = AArch64GrBegOffset;
7859 unsigned VrOffset = AArch64VrBegOffset;
7860 unsigned OverflowOffset = AArch64VAEndOffset;
7862 const DataLayout &
DL =
F.getDataLayout();
7865 auto [AK, RegNum] = classifyArgument(
A->getType());
7866 if (AK == AK_GeneralPurpose &&
7867 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7869 if (AK == AK_FloatingPoint &&
7870 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7874 case AK_GeneralPurpose:
7875 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7876 GrOffset += 8 * RegNum;
7878 case AK_FloatingPoint:
7879 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7880 VrOffset += 16 * RegNum;
7887 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7888 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7889 unsigned BaseOffset = OverflowOffset;
7890 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7891 OverflowOffset += AlignedSize;
7894 CleanUnusedTLS(IRB,
Base, BaseOffset);
7906 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7907 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7912 Value *SaveAreaPtrPtr =
7913 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7914 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7919 Value *SaveAreaPtr =
7920 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7922 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7925 void finalizeInstrumentation()
override {
7926 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7927 "finalizeInstrumentation called twice");
7928 if (!VAStartInstrumentationList.empty()) {
7935 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7936 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7942 Intrinsic::umin, CopySize,
7948 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7949 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7953 for (CallInst *OrigInst : VAStartInstrumentationList) {
7954 NextNodeIRBuilder IRB(OrigInst);
7956 Value *VAListTag = OrigInst->getArgOperand(0);
7973 Value *StackSaveAreaPtr =
7974 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7977 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7978 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7981 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7984 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7985 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7988 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7994 Value *GrRegSaveAreaShadowPtrOff =
7995 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7997 Value *GrRegSaveAreaShadowPtr =
7998 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8004 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
8010 Value *VrRegSaveAreaShadowPtrOff =
8011 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
8013 Value *VrRegSaveAreaShadowPtr =
8014 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8021 VrRegSaveAreaShadowPtrOff);
8022 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
8028 Value *StackSaveAreaShadowPtr =
8029 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8034 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
8037 Align(16), VAArgOverflowSize);
8043struct VarArgPowerPC64Helper :
public VarArgHelperBase {
8044 AllocaInst *VAArgTLSCopy =
nullptr;
8045 Value *VAArgSize =
nullptr;
8047 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
8048 MemorySanitizerVisitor &MSV)
8049 : VarArgHelperBase(
F, MS, MSV, 8) {}
8051 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8059 Triple TargetTriple(
F.getParent()->getTargetTriple());
8063 if (TargetTriple.isPPC64ELFv2ABI())
8067 unsigned VAArgOffset = VAArgBase;
8068 const DataLayout &
DL =
F.getDataLayout();
8071 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8073 assert(
A->getType()->isPointerTy());
8075 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8078 ArgAlign =
Align(8);
8079 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8082 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8084 Value *AShadowPtr, *AOriginPtr;
8085 std::tie(AShadowPtr, AOriginPtr) =
8086 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8096 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8098 if (
A->getType()->isArrayTy()) {
8101 Type *ElementTy =
A->getType()->getArrayElementType();
8103 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8104 }
else if (
A->getType()->isVectorTy()) {
8106 ArgAlign =
Align(ArgSize);
8109 ArgAlign =
Align(8);
8110 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8111 if (
DL.isBigEndian()) {
8115 VAArgOffset += (8 - ArgSize);
8119 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8123 VAArgOffset += ArgSize;
8127 VAArgBase = VAArgOffset;
8131 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8134 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8137 void finalizeInstrumentation()
override {
8138 assert(!VAArgSize && !VAArgTLSCopy &&
8139 "finalizeInstrumentation called twice");
8142 Value *CopySize = VAArgSize;
8144 if (!VAStartInstrumentationList.empty()) {
8148 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8154 Intrinsic::umin, CopySize,
8162 for (CallInst *OrigInst : VAStartInstrumentationList) {
8163 NextNodeIRBuilder IRB(OrigInst);
8164 Value *VAListTag = OrigInst->getArgOperand(0);
8167 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8170 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8171 const DataLayout &
DL =
F.getDataLayout();
8172 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8174 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8175 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8177 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8184struct VarArgPowerPC32Helper :
public VarArgHelperBase {
8185 AllocaInst *VAArgTLSCopy =
nullptr;
8186 Value *VAArgSize =
nullptr;
8188 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
8189 MemorySanitizerVisitor &MSV)
8190 : VarArgHelperBase(
F, MS, MSV, 12) {}
8192 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8196 unsigned VAArgOffset = VAArgBase;
8197 const DataLayout &
DL =
F.getDataLayout();
8198 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8201 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8203 assert(
A->getType()->isPointerTy());
8205 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8207 if (ArgAlign < IntptrSize)
8208 ArgAlign =
Align(IntptrSize);
8209 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8212 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8214 Value *AShadowPtr, *AOriginPtr;
8215 std::tie(AShadowPtr, AOriginPtr) =
8216 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8226 Type *ArgTy =
A->getType();
8232 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
8239 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8242 ArgAlign =
Align(ArgSize);
8244 if (ArgAlign < IntptrSize)
8245 ArgAlign =
Align(IntptrSize);
8246 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8247 if (
DL.isBigEndian()) {
8250 if (ArgSize < IntptrSize)
8251 VAArgOffset += (IntptrSize - ArgSize);
8254 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
8260 VAArgOffset += ArgSize;
8267 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8270 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8273 void finalizeInstrumentation()
override {
8274 assert(!VAArgSize && !VAArgTLSCopy &&
8275 "finalizeInstrumentation called twice");
8277 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8278 Value *CopySize = VAArgSize;
8280 if (!VAStartInstrumentationList.empty()) {
8284 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8290 Intrinsic::umin, CopySize,
8298 for (CallInst *OrigInst : VAStartInstrumentationList) {
8299 NextNodeIRBuilder IRB(OrigInst);
8300 Value *VAListTag = OrigInst->getArgOperand(0);
8302 Value *RegSaveAreaSize = CopySize;
8306 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8310 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8312 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8315 const DataLayout &
DL =
F.getDataLayout();
8316 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8320 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8321 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8322 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8324 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8325 Alignment, RegSaveAreaSize);
8327 RegSaveAreaShadowPtr =
8330 ConstantInt::get(MS.IntptrTy, 32));
8335 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8340 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8343 OverflowAreaPtrPtr =
8344 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8345 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8347 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8349 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8350 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8351 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8354 Value *OverflowVAArgTLSCopyPtr =
8356 OverflowVAArgTLSCopyPtr =
8357 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8359 OverflowVAArgTLSCopyPtr =
8362 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8369struct VarArgSystemZHelper :
public VarArgHelperBase {
8370 static const unsigned SystemZGpOffset = 16;
8371 static const unsigned SystemZGpEndOffset = 56;
8372 static const unsigned SystemZFpOffset = 128;
8373 static const unsigned SystemZFpEndOffset = 160;
8374 static const unsigned SystemZMaxVrArgs = 8;
8375 static const unsigned SystemZRegSaveAreaSize = 160;
8376 static const unsigned SystemZOverflowOffset = 160;
8377 static const unsigned SystemZVAListTagSize = 32;
8378 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8379 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8381 bool IsSoftFloatABI;
8382 AllocaInst *VAArgTLSCopy =
nullptr;
8383 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8384 Value *VAArgOverflowSize =
nullptr;
8386 enum class ArgKind {
8394 enum class ShadowExtension {
None,
Zero, Sign };
8396 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8397 MemorySanitizerVisitor &MSV)
8398 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8399 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8401 ArgKind classifyArgument(
Type *
T) {
8408 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8409 return ArgKind::Indirect;
8410 if (
T->isFloatingPointTy())
8411 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8412 if (
T->isIntegerTy() ||
T->isPointerTy())
8413 return ArgKind::GeneralPurpose;
8414 if (
T->isVectorTy())
8415 return ArgKind::Vector;
8416 return ArgKind::Memory;
8419 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8429 return ShadowExtension::Zero;
8433 return ShadowExtension::Sign;
8435 return ShadowExtension::None;
8438 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8439 unsigned GpOffset = SystemZGpOffset;
8440 unsigned FpOffset = SystemZFpOffset;
8441 unsigned VrIndex = 0;
8442 unsigned OverflowOffset = SystemZOverflowOffset;
8443 const DataLayout &
DL =
F.getDataLayout();
8449 ArgKind AK = classifyArgument(
T);
8450 if (AK == ArgKind::Indirect) {
8452 AK = ArgKind::GeneralPurpose;
8454 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8455 AK = ArgKind::Memory;
8456 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8457 AK = ArgKind::Memory;
8458 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8459 AK = ArgKind::Memory;
8460 Value *ShadowBase =
nullptr;
8461 Value *OriginBase =
nullptr;
8462 ShadowExtension SE = ShadowExtension::None;
8464 case ArgKind::GeneralPurpose: {
8466 uint64_t ArgSize = 8;
8469 SE = getShadowExtension(CB, ArgNo);
8470 uint64_t GapSize = 0;
8471 if (SE == ShadowExtension::None) {
8472 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8473 assert(ArgAllocSize <= ArgSize);
8474 GapSize = ArgSize - ArgAllocSize;
8476 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8477 if (MS.TrackOrigins)
8478 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8480 GpOffset += ArgSize;
8486 case ArgKind::FloatingPoint: {
8488 uint64_t ArgSize = 8;
8495 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8496 if (MS.TrackOrigins)
8497 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8499 FpOffset += ArgSize;
8505 case ArgKind::Vector: {
8512 case ArgKind::Memory: {
8517 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8518 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8520 SE = getShadowExtension(CB, ArgNo);
8522 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8524 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8525 if (MS.TrackOrigins)
8527 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8528 OverflowOffset += ArgSize;
8535 case ArgKind::Indirect:
8538 if (ShadowBase ==
nullptr)
8540 Value *Shadow = MSV.getShadow(
A);
8541 if (SE != ShadowExtension::None)
8542 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8543 SE == ShadowExtension::Sign);
8544 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8546 if (MS.TrackOrigins) {
8547 Value *Origin = MSV.getOrigin(
A);
8548 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8549 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8553 Constant *OverflowSize = ConstantInt::get(
8554 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8555 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8562 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8565 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8567 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8568 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8573 unsigned RegSaveAreaSize =
8574 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8575 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8577 if (MS.TrackOrigins)
8578 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8579 Alignment, RegSaveAreaSize);
8588 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8590 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8591 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8593 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8594 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8597 SystemZOverflowOffset);
8598 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8600 if (MS.TrackOrigins) {
8602 SystemZOverflowOffset);
8603 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8608 void finalizeInstrumentation()
override {
8609 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8610 "finalizeInstrumentation called twice");
8611 if (!VAStartInstrumentationList.empty()) {
8618 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8620 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8626 Intrinsic::umin, CopySize,
8630 if (MS.TrackOrigins) {
8631 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8640 for (CallInst *OrigInst : VAStartInstrumentationList) {
8641 NextNodeIRBuilder IRB(OrigInst);
8642 Value *VAListTag = OrigInst->getArgOperand(0);
8643 copyRegSaveArea(IRB, VAListTag);
8644 copyOverflowArea(IRB, VAListTag);
8650struct VarArgI386Helper :
public VarArgHelperBase {
8651 AllocaInst *VAArgTLSCopy =
nullptr;
8652 Value *VAArgSize =
nullptr;
8654 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8655 MemorySanitizerVisitor &MSV)
8656 : VarArgHelperBase(
F, MS, MSV, 4) {}
8658 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8659 const DataLayout &
DL =
F.getDataLayout();
8660 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8661 unsigned VAArgOffset = 0;
8664 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8666 assert(
A->getType()->isPointerTy());
8668 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8670 if (ArgAlign < IntptrSize)
8671 ArgAlign =
Align(IntptrSize);
8672 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8674 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8676 Value *AShadowPtr, *AOriginPtr;
8677 std::tie(AShadowPtr, AOriginPtr) =
8678 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8688 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8690 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8691 if (
DL.isBigEndian()) {
8694 if (ArgSize < IntptrSize)
8695 VAArgOffset += (IntptrSize - ArgSize);
8698 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8701 VAArgOffset += ArgSize;
8707 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8710 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8713 void finalizeInstrumentation()
override {
8714 assert(!VAArgSize && !VAArgTLSCopy &&
8715 "finalizeInstrumentation called twice");
8717 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8718 Value *CopySize = VAArgSize;
8720 if (!VAStartInstrumentationList.empty()) {
8723 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8729 Intrinsic::umin, CopySize,
8737 for (CallInst *OrigInst : VAStartInstrumentationList) {
8738 NextNodeIRBuilder IRB(OrigInst);
8739 Value *VAListTag = OrigInst->getArgOperand(0);
8740 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8741 Value *RegSaveAreaPtrPtr =
8743 PointerType::get(*MS.C, 0));
8744 Value *RegSaveAreaPtr =
8745 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8746 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8747 const DataLayout &
DL =
F.getDataLayout();
8748 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8750 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8751 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8753 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8761struct VarArgGenericHelper :
public VarArgHelperBase {
8762 AllocaInst *VAArgTLSCopy =
nullptr;
8763 Value *VAArgSize =
nullptr;
8765 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8766 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8767 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8769 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8770 unsigned VAArgOffset = 0;
8771 const DataLayout &
DL =
F.getDataLayout();
8772 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8777 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8778 if (
DL.isBigEndian()) {
8781 if (ArgSize < IntptrSize)
8782 VAArgOffset += (IntptrSize - ArgSize);
8784 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8785 VAArgOffset += ArgSize;
8786 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8792 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8795 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8798 void finalizeInstrumentation()
override {
8799 assert(!VAArgSize && !VAArgTLSCopy &&
8800 "finalizeInstrumentation called twice");
8802 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8803 Value *CopySize = VAArgSize;
8805 if (!VAStartInstrumentationList.empty()) {
8808 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8814 Intrinsic::umin, CopySize,
8822 for (CallInst *OrigInst : VAStartInstrumentationList) {
8823 NextNodeIRBuilder IRB(OrigInst);
8824 Value *VAListTag = OrigInst->getArgOperand(0);
8825 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8826 Value *RegSaveAreaPtrPtr =
8828 PointerType::get(*MS.C, 0));
8829 Value *RegSaveAreaPtr =
8830 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8831 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8832 const DataLayout &
DL =
F.getDataLayout();
8833 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8835 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8836 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8838 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8846using VarArgARM32Helper = VarArgGenericHelper;
8847using VarArgRISCVHelper = VarArgGenericHelper;
8848using VarArgMIPSHelper = VarArgGenericHelper;
8849using VarArgLoongArch64Helper = VarArgGenericHelper;
8852struct VarArgNoOpHelper :
public VarArgHelper {
8853 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8854 MemorySanitizerVisitor &MSV) {}
8856 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8858 void visitVAStartInst(VAStartInst &
I)
override {}
8860 void visitVACopyInst(VACopyInst &
I)
override {}
8862 void finalizeInstrumentation()
override {}
8868 MemorySanitizerVisitor &Visitor) {
8871 Triple TargetTriple(Func.getParent()->getTargetTriple());
8874 return new VarArgI386Helper(Func, Msan, Visitor);
8877 return new VarArgAMD64Helper(Func, Msan, Visitor);
8879 if (TargetTriple.
isARM())
8880 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8883 return new VarArgAArch64Helper(Func, Msan, Visitor);
8886 return new VarArgSystemZHelper(Func, Msan, Visitor);
8891 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8894 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8897 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8900 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8903 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8906 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8909 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8912 return new VarArgNoOpHelper(Func, Msan, Visitor);
8919 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8922 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8929 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_S390X_MemoryMapParams
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(CounterInfo &Counter)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.