118 using namespace llvm;
120 #define DEBUG_TYPE "msan"
138 cl::desc(
"Track origins (allocation sites) of poisoned memory"),
141 cl::desc(
"keep going after reporting a UMR"),
144 cl::desc(
"poison uninitialized stack variables"),
147 cl::desc(
"poison uninitialized stack variables with a call"),
150 cl::desc(
"poison uninitialized stack variables with the given pattern"),
157 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
161 cl::desc(
"exact handling of relational integer ICmp"),
171 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
175 cl::desc(
"print out instructions with default strict semantics"),
179 "msan-instrumentation-with-call-threshold",
181 "If the function being instrumented requires more than "
182 "this number of checks and origin stores, use callbacks instead of "
183 "inline checks (-1 means never use callbacks)."),
190 cl::desc(
"Insert checks for constant shadow values"),
196 cl::desc(
"Place MSan constructors in comdat sections"),
208 struct MemoryMapParams {
215 struct PlatformMemoryMapParams {
216 const MemoryMapParams *bits32;
217 const MemoryMapParams *bits64;
221 static const MemoryMapParams Linux_I386_MemoryMapParams = {
229 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
230 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
244 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
252 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
260 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
268 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
276 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
283 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
284 &Linux_I386_MemoryMapParams,
285 &Linux_X86_64_MemoryMapParams,
288 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
290 &Linux_MIPS64_MemoryMapParams,
293 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
295 &Linux_PowerPC64_MemoryMapParams,
298 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
300 &Linux_AArch64_MemoryMapParams,
303 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
304 &FreeBSD_I386_MemoryMapParams,
305 &FreeBSD_X86_64_MemoryMapParams,
315 MemorySanitizer(
int TrackOrigins = 0,
bool Recover =
false)
319 WarningFn(
nullptr) {}
320 StringRef getPassName()
const override {
return "MemorySanitizer"; }
324 bool runOnFunction(
Function &
F)
override;
325 bool doInitialization(
Module &M)
override;
329 void initializeCallbacks(
Module &M);
364 Value *MsanSetAllocaOrigin4Fn;
366 Value *MsanPoisonStackFn;
369 Value *MsanChainOriginFn;
371 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
374 const MemoryMapParams *MapParams;
378 MDNode *OriginStoreWeights;
383 friend struct MemorySanitizerVisitor;
384 friend struct VarArgAMD64Helper;
385 friend struct VarArgMIPS64Helper;
386 friend struct VarArgAArch64Helper;
387 friend struct VarArgPowerPC64Helper;
393 MemorySanitizer,
"msan",
394 "MemorySanitizer: detects uninitialized reads.",
false,
false)
398 "MemorySanitizer: detects uninitialized reads.",
false, false)
401 return new MemorySanitizer(TrackOrigins, Recover);
417 void MemorySanitizer::initializeCallbacks(
Module &M) {
426 StringRef WarningFnName = Recover ?
"__msan_warning"
427 :
"__msan_warning_noreturn";
432 unsigned AccessSize = 1 << AccessSizeIndex;
433 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
435 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
436 IRB.getInt32Ty(),
nullptr);
438 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
440 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
441 IRB.getInt8PtrTy(), IRB.getInt32Ty(),
nullptr);
445 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
446 IRB.getInt8PtrTy(), IntptrTy,
nullptr);
449 IRB.getInt8PtrTy(), IntptrTy,
nullptr);
451 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(),
nullptr);
453 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
454 IRB.getInt8PtrTy(), IntptrTy,
nullptr);
456 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
459 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
486 "__msan_va_arg_overflow_size_tls",
nullptr,
501 bool MemorySanitizer::doInitialization(
Module &M) {
505 switch (TargetTriple.getOS()) {
507 switch (TargetTriple.getArch()) {
509 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
512 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
519 switch (TargetTriple.getArch()) {
521 MapParams = Linux_X86_MemoryMapParams.bits64;
524 MapParams = Linux_X86_MemoryMapParams.bits32;
528 MapParams = Linux_MIPS_MemoryMapParams.bits64;
532 MapParams = Linux_PowerPC_MemoryMapParams.bits64;
536 MapParams = Linux_ARM_MemoryMapParams.bits64;
548 IntptrTy = IRB.getIntPtrTy(DL);
549 OriginTy = IRB.getInt32Ty();
554 std::tie(MsanCtorFunction, std::ignore) =
560 MsanCtorFunction->setComdat(MsanCtorComdat);
569 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
573 IRB.getInt32(Recover),
"__msan_keep_going");
588 struct VarArgHelper {
602 virtual void finalizeInstrumentation() = 0;
604 virtual ~VarArgHelper() {}
607 struct MemorySanitizerVisitor;
610 CreateVarArgHelper(
Function &
Func, MemorySanitizer &Msan,
611 MemorySanitizerVisitor &Visitor);
614 if (TypeSize <= 8)
return 0;
624 struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
629 std::unique_ptr<VarArgHelper> VAHelper;
635 bool PropagateShadow;
638 bool CheckReturnValue;
640 struct ShadowOriginAndInsertPoint {
645 : Shadow(S), Origin(O), OrigIns(I) { }
650 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS)
651 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
652 bool SanitizeFunction = F.
hasFnAttribute(Attribute::SanitizeMemory);
653 InsertChecks = SanitizeFunction;
654 PropagateShadow = SanitizeFunction;
659 CheckReturnValue = SanitizeFunction && (F.
getName() ==
"main");
662 DEBUG(
if (!InsertChecks)
663 dbgs() <<
"MemorySanitizer is not inserting checks into '"
668 if (MS.TrackOrigins <= 1)
return V;
669 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
683 unsigned Size,
unsigned Alignment) {
691 unsigned CurrentAlignment = Alignment;
692 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
693 Value *IntptrOrigin = originToIntptr(IRB, Origin);
694 Value *IntptrOriginPtr =
696 for (
unsigned i = 0;
i < Size / IntptrSize; ++
i) {
701 CurrentAlignment = IntptrAlignment;
714 unsigned Alignment,
bool AsCall) {
719 paintOrigin(IRB, updateOrigin(Origin, IRB),
720 getOriginPtr(Addr, IRB, Alignment), StoreSize,
723 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
724 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
725 if (ConstantShadow) {
727 paintOrigin(IRB, updateOrigin(Origin, IRB),
728 getOriginPtr(Addr, IRB, Alignment), StoreSize,
733 unsigned TypeSizeInBits =
736 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
737 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
739 ConvertedShadow, IRB.
getIntNTy(8 * (1 << SizeIndex)));
745 ConvertedShadow, getCleanShadow(ConvertedShadow),
"_mscmp");
749 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
750 getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
756 void materializeStores(
bool InstrumentWithCalls) {
759 Value *Val =
SI->getValueOperand();
760 Value *Addr =
SI->getPointerOperand();
761 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
762 Value *ShadowPtr = getShadowPtr(Addr, Shadow->
getType(), IRB);
766 DEBUG(
dbgs() <<
" STORE: " << *NewSI <<
"\n");
770 insertShadowCheck(Addr, SI);
773 SI->setOrdering(addReleaseOrdering(
SI->getOrdering()));
775 if (MS.TrackOrigins && !
SI->isAtomic())
776 storeOrigin(IRB, Addr, Shadow, getOrigin(Val),
SI->getAlignment(),
777 InstrumentWithCalls);
784 DEBUG(
dbgs() <<
" SHAD0 : " << *Shadow <<
"\n");
785 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
786 DEBUG(
dbgs() <<
" SHAD1 : " << *ConvertedShadow <<
"\n");
788 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
789 if (ConstantShadow) {
791 if (MS.TrackOrigins) {
808 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
809 Value *Fn = MS.MaybeWarningFn[SizeIndex];
810 Value *ConvertedShadow2 =
812 IRB.
CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
817 getCleanShadow(ConvertedShadow),
"_mscmp");
820 !MS.Recover, MS.ColdCallWeights);
823 if (MS.TrackOrigins) {
829 DEBUG(
dbgs() <<
" CHECK: " << *Cmp <<
"\n");
833 void materializeChecks(
bool InstrumentWithCalls) {
834 for (
const auto &ShadowData : InstrumentationList) {
836 Value *Shadow = ShadowData.Shadow;
837 Value *Origin = ShadowData.Origin;
838 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
844 bool runOnFunction() {
861 for (
PHINode *PN : ShadowPHINodes) {
862 PHINode *PNS = cast<PHINode>(getShadow(PN));
863 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) :
nullptr;
864 size_t NumValues = PN->getNumIncomingValues();
865 for (
size_t v = 0; v < NumValues; v++) {
866 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
867 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
871 VAHelper->finalizeInstrumentation();
874 InstrumentationList.size() + StoreList.size() >
879 materializeStores(InstrumentWithCalls);
882 materializeChecks(InstrumentWithCalls);
889 return getShadowTy(V->
getType());
902 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
904 return VectorType::get(IntegerType::get(*MS.C, EltSize),
905 VT->getNumElements());
907 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
908 return ArrayType::get(getShadowTy(AT->getElementType()),
909 AT->getNumElements());
913 for (
unsigned i = 0, n =
ST->getNumElements();
i < n;
i++)
915 StructType *Res = StructType::get(*MS.C, Elements,
ST->isPacked());
916 DEBUG(
dbgs() <<
"getShadowTy: " << *
ST <<
" ===> " << *Res <<
"\n");
920 return IntegerType::get(*MS.C, TypeSize);
925 if (
VectorType *vt = dyn_cast<VectorType>(ty))
926 return IntegerType::get(*MS.C, vt->getBitWidth());
933 Type *NoVecTy = getShadowTyNoVec(Ty);
934 if (Ty == NoVecTy)
return V;
945 uint64_t AndMask = MS.MapParams->AndMask;
948 IRB.
CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
950 uint64_t XorMask = MS.MapParams->XorMask;
953 IRB.
CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
963 Value *ShadowLong = getShadowPtrOffset(Addr, IRB);
964 uint64_t ShadowBase = MS.MapParams->ShadowBase;
968 ConstantInt::get(MS.IntptrTy, ShadowBase));
969 return IRB.
CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
977 Value *OriginLong = getShadowPtrOffset(Addr, IRB);
978 uint64_t OriginBase = MS.MapParams->OriginBase;
982 ConstantInt::get(MS.IntptrTy, OriginBase));
986 ConstantInt::get(MS.IntptrTy, ~Mask));
998 Base = IRB.
CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
999 return IRB.
CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1006 if (!MS.TrackOrigins)
return nullptr;
1008 Base = IRB.
CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1009 return IRB.
CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1016 return IRB.
CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1023 return MS.RetvalOriginTLS;
1028 assert(!ShadowMap.count(V) &&
"Values may only have one shadow");
1029 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1034 if (!MS.TrackOrigins)
return;
1035 assert(!OriginMap.count(V) &&
"Values may only have one origin");
1036 DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1037 OriginMap[V] = Origin;
1045 Type *ShadowTy = getShadowTy(V);
1048 return Constant::getNullValue(ShadowTy);
1054 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1055 return Constant::getAllOnesValue(ShadowTy);
1056 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1058 getPoisonedShadow(AT->getElementType()));
1059 return ConstantArray::get(AT,
Vals);
1063 for (
unsigned i = 0, n =
ST->getNumElements();
i < n;
i++)
1064 Vals.
push_back(getPoisonedShadow(
ST->getElementType(
i)));
1065 return ConstantStruct::get(
ST, Vals);
1072 Type *ShadowTy = getShadowTy(V);
1075 return getPoisonedShadow(ShadowTy);
1079 Value *getCleanOrigin() {
1080 return Constant::getNullValue(MS.OriginTy);
1088 if (!PropagateShadow)
return getCleanShadow(V);
1091 Value *Shadow = ShadowMap[V];
1093 DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1095 assert(Shadow &&
"No shadow for a value");
1099 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1100 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1101 DEBUG(
dbgs() <<
"Undef: " << *U <<
" ==> " << *AllOnes <<
"\n");
1105 if (
Argument *A = dyn_cast<Argument>(V)) {
1107 Value **ShadowPtr = &ShadowMap[V];
1112 unsigned ArgOffset = 0;
1114 for (
auto &FArg : F->
args()) {
1115 if (!FArg.getType()->isSized()) {
1125 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1126 if (FArg.hasByValAttr()) {
1130 unsigned ArgAlign = FArg.getParamAlignment();
1131 if (ArgAlign == 0) {
1137 EntryIRB.CreateMemSet(
1138 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
1139 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
1142 Value *Cpy = EntryIRB.CreateMemCpy(
1143 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
1145 DEBUG(
dbgs() <<
" ByValCpy: " << *Cpy <<
"\n");
1148 *ShadowPtr = getCleanShadow(V);
1152 *ShadowPtr = getCleanShadow(V);
1158 DEBUG(
dbgs() <<
" ARG: " << FArg <<
" ==> " <<
1159 **ShadowPtr <<
"\n");
1160 if (MS.TrackOrigins && !Overflow) {
1162 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1163 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1165 setOrigin(A, getCleanOrigin());
1170 assert(*ShadowPtr &&
"Could not find shadow for an argument");
1174 return getCleanShadow(V);
1184 if (!MS.TrackOrigins)
return nullptr;
1185 if (!PropagateShadow)
return getCleanOrigin();
1186 if (isa<Constant>(V))
return getCleanOrigin();
1187 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1188 "Unexpected value type in getOrigin()");
1189 Value *Origin = OriginMap[V];
1190 assert(Origin &&
"Missing origin");
1205 if (!InsertChecks)
return;
1208 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1209 "Can only insert checks for integer and vector shadow types");
1211 InstrumentationList.push_back(
1212 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1221 Value *Shadow, *Origin;
1223 Shadow = getShadow(Val);
1224 if (!Shadow)
return;
1225 Origin = getOrigin(Val);
1227 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1228 if (!Shadow)
return;
1229 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1231 insertShadowCheck(Shadow, Origin, OrigIns);
1236 case AtomicOrdering::NotAtomic:
1237 return AtomicOrdering::NotAtomic;
1238 case AtomicOrdering::Unordered:
1239 case AtomicOrdering::Monotonic:
1240 case AtomicOrdering::Release:
1241 return AtomicOrdering::Release;
1242 case AtomicOrdering::Acquire:
1243 case AtomicOrdering::AcquireRelease:
1244 return AtomicOrdering::AcquireRelease;
1245 case AtomicOrdering::SequentiallyConsistent:
1246 return AtomicOrdering::SequentiallyConsistent;
1253 case AtomicOrdering::NotAtomic:
1254 return AtomicOrdering::NotAtomic;
1255 case AtomicOrdering::Unordered:
1256 case AtomicOrdering::Monotonic:
1257 case AtomicOrdering::Acquire:
1258 return AtomicOrdering::Acquire;
1259 case AtomicOrdering::Release:
1260 case AtomicOrdering::AcquireRelease:
1261 return AtomicOrdering::AcquireRelease;
1262 case AtomicOrdering::SequentiallyConsistent:
1263 return AtomicOrdering::SequentiallyConsistent;
1277 Type *ShadowTy = getShadowTy(&I);
1279 if (PropagateShadow && !I.
getMetadata(
"nosanitize")) {
1280 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1284 setShadow(&I, getCleanShadow(&I));
1293 if (MS.TrackOrigins) {
1294 if (PropagateShadow) {
1300 setOrigin(&I, getCleanOrigin());
1310 StoreList.push_back(&I);
1314 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1318 Value *ShadowPtr = getShadowPtr(Addr, I.
getType(), IRB);
1321 insertShadowCheck(Addr, &I);
1326 if (isa<AtomicCmpXchgInst>(I))
1331 setShadow(&I, getCleanShadow(&I));
1332 setOrigin(&I, getCleanOrigin());
1351 setOrigin(&I, getOrigin(&I, 0));
1359 setOriginForNaryOp(I);
1367 setOriginForNaryOp(I);
1374 setOrigin(&I, getOrigin(&I, 0));
1380 setOrigin(&I, getOrigin(&I, 0));
1386 setOrigin(&I, getOrigin(&I, 0));
1393 if (
auto *CI = dyn_cast<CallInst>(I.
getOperand(0)))
1394 if (CI->isMustTailCall())
1397 setShadow(&I, IRB.
CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1398 setOrigin(&I, getOrigin(&I, 0));
1403 setShadow(&I, IRB.
CreateIntCast(getShadow(&I, 0), getShadowTy(&I),
false,
1404 "_msprop_ptrtoint"));
1405 setOrigin(&I, getOrigin(&I, 0));
1410 setShadow(&I, IRB.
CreateIntCast(getShadow(&I, 0), getShadowTy(&I),
false,
1411 "_msprop_inttoptr"));
1412 setOrigin(&I, getOrigin(&I, 0));
1415 void visitFPToSIInst(
CastInst& I) { handleShadowOr(I); }
1416 void visitFPToUIInst(
CastInst& I) { handleShadowOr(I); }
1417 void visitSIToFPInst(
CastInst& I) { handleShadowOr(I); }
1418 void visitUIToFPInst(
CastInst& I) { handleShadowOr(I); }
1419 void visitFPExtInst(
CastInst& I) { handleShadowOr(I); }
1420 void visitFPTruncInst(
CastInst& I) { handleShadowOr(I); }
1434 Value *S1 = getShadow(&I, 0);
1435 Value *S2 = getShadow(&I, 1);
1446 setOriginForNaryOp(I);
1456 Value *S1 = getShadow(&I, 0);
1457 Value *S2 = getShadow(&I, 1);
1468 setOriginForNaryOp(I);
1486 template <
bool CombineShadow>
1491 MemorySanitizerVisitor *MSV;
1494 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB) :
1495 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1499 if (CombineShadow) {
1504 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->
getType());
1505 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
1509 if (MSV->MS.TrackOrigins) {
1516 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
1517 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1519 IRB.
CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1529 Value *OpShadow = MSV->getShadow(V);
1530 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
1531 return Add(OpShadow, OpOrigin);
1537 if (CombineShadow) {
1539 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1540 MSV->setShadow(I, Shadow);
1542 if (MSV->MS.TrackOrigins) {
1544 MSV->setOrigin(I, Origin);
1549 typedef Combiner<true> ShadowAndOriginCombiner;
1550 typedef Combiner<false> OriginCombiner;
1554 if (!MS.TrackOrigins)
return;
1556 OriginCombiner
OC(
this, IRB);
1557 for (Instruction::op_iterator OI = I.
op_begin(); OI != I.
op_end(); ++OI)
1562 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
1564 "Vector of pointers is not a valid shadow type");
1580 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1581 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1591 Type *ShadowTy = getShadowTy(V);
1603 ShadowAndOriginCombiner
SC(
this, IRB);
1604 for (Instruction::op_iterator OI = I.
op_begin(); OI != I.
op_end(); ++OI)
1626 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
1629 const APInt &V = Elt->getValue();
1631 Elements.
push_back(ConstantInt::get(EltTy, V2));
1633 Elements.
push_back(ConstantInt::get(EltTy, 1));
1636 ShadowMul = ConstantVector::get(Elements);
1638 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
1639 const APInt &V = Elt->getValue();
1641 ShadowMul = ConstantInt::get(Ty, V2);
1643 ShadowMul = ConstantInt::get(Ty, 1);
1649 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
1650 setOrigin(&I, getOrigin(OtherArg));
1656 if (constOp0 && !constOp1)
1657 handleMulByConstant(I, constOp0, I.
getOperand(1));
1658 else if (constOp1 && !constOp0)
1659 handleMulByConstant(I, constOp1, I.
getOperand(0));
1675 setShadow(&I, getShadow(&I, 0));
1676 setOrigin(&I, getOrigin(&I, 0));
1690 void handleEqualityComparison(
ICmpInst &I) {
1694 Value *Sa = getShadow(A);
1695 Value *Sb = getShadow(B);
1714 Value *MinusOne = Constant::getAllOnesValue(Sc->
getType());
1721 setOriginForNaryOp(I);
1763 void handleRelationalComparisonExact(
ICmpInst &I) {
1767 Value *Sa = getShadow(A);
1768 Value *Sb = getShadow(B);
1781 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1782 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1784 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1785 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1788 setOriginForNaryOp(I);
1795 void handleSignedRelationalComparison(
ICmpInst &I) {
1799 if ((constOp = dyn_cast<Constant>(I.
getOperand(1)))) {
1802 }
else if ((constOp = dyn_cast<Constant>(I.
getOperand(0)))) {
1811 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
1813 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
1817 setShadow(&I, Shadow);
1818 setOrigin(&I, getOrigin(op));
1830 handleEqualityComparison(I);
1836 handleRelationalComparisonExact(I);
1840 handleSignedRelationalComparison(I);
1846 handleRelationalComparisonExact(I);
1861 Value *S1 = getShadow(&I, 0);
1862 Value *S2 = getShadow(&I, 1);
1867 setShadow(&I, IRB.
CreateOr(Shift, S2Conv));
1868 setOriginForNaryOp(I);
1921 VAHelper->visitVAStartInst(I);
1925 VAHelper->visitVACopyInst(I);
1935 Value *Shadow = getShadow(&I, 1);
1936 Value *ShadowPtr = getShadowPtr(Addr, Shadow->
getType(), IRB);
1943 insertShadowCheck(Addr, &I);
1947 if (MS.TrackOrigins)
1948 IRB.
CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
1960 Type *ShadowTy = getShadowTy(&I);
1961 if (PropagateShadow) {
1962 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1967 setShadow(&I, getCleanShadow(&I));
1971 insertShadowCheck(Addr, &I);
1973 if (MS.TrackOrigins) {
1974 if (PropagateShadow)
1975 setOrigin(&I, IRB.
CreateLoad(getOriginPtr(Addr, IRB, 1)));
1977 setOrigin(&I, getCleanOrigin());
1997 for (
unsigned i = 0; i < NumArgOperands; ++
i) {
2004 ShadowAndOriginCombiner
SC(
this, IRB);
2005 for (
unsigned i = 0; i < NumArgOperands; ++
i)
2024 if (NumArgOperands == 0)
2027 if (NumArgOperands == 2 &&
2033 return handleVectorStoreIntrinsic(I);
2036 if (NumArgOperands == 1 &&
2041 return handleVectorLoadIntrinsic(I);
2045 if (maybeHandleSimpleNomemIntrinsic(I))
2058 setShadow(&I, IRB.
CreateCall(BswapFunc, getShadow(Op)));
2059 setOrigin(&I, getOrigin(Op));
2077 void handleVectorConvertIntrinsic(
IntrinsicInst &I,
int NumUsedElements) {
2079 Value *CopyOp, *ConvertOp;
2103 Value *ConvertShadow = getShadow(ConvertOp);
2104 Value *AggShadow =
nullptr;
2107 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
2108 for (
int i = 1; i < NumUsedElements; ++
i) {
2110 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(),
i));
2111 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
2114 AggShadow = ConvertShadow;
2117 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2124 Value *ResultShadow = getShadow(CopyOp);
2126 for (
int i = 0; i < NumUsedElements; ++
i) {
2128 ResultShadow, ConstantInt::getNullValue(EltTy),
2131 setShadow(&I, ResultShadow);
2132 setOrigin(&I, getOrigin(CopyOp));
2134 setShadow(&I, getCleanShadow(&I));
2135 setOrigin(&I, getCleanOrigin());
2143 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
2146 return CreateShadowCast(IRB, S2, T,
true);
2154 return CreateShadowCast(IRB, S2, T,
true);
2171 void handleVectorShiftIntrinsic(
IntrinsicInst &I,
bool Variable) {
2176 Value *S1 = getShadow(&I, 0);
2177 Value *S2 = getShadow(&I, 1);
2178 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2179 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2185 setShadow(&I, IRB.
CreateOr(Shift, S2Conv));
2186 setOriginForNaryOp(I);
2190 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
2191 const unsigned X86_MMXSizeInBits = 64;
2192 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2193 X86_MMXSizeInBits / EltSizeInBits);
2200 case llvm::Intrinsic::x86_sse2_packsswb_128:
2201 case llvm::Intrinsic::x86_sse2_packuswb_128:
2202 return llvm::Intrinsic::x86_sse2_packsswb_128;
2204 case llvm::Intrinsic::x86_sse2_packssdw_128:
2205 case llvm::Intrinsic::x86_sse41_packusdw:
2206 return llvm::Intrinsic::x86_sse2_packssdw_128;
2208 case llvm::Intrinsic::x86_avx2_packsswb:
2209 case llvm::Intrinsic::x86_avx2_packuswb:
2210 return llvm::Intrinsic::x86_avx2_packsswb;
2212 case llvm::Intrinsic::x86_avx2_packssdw:
2213 case llvm::Intrinsic::x86_avx2_packusdw:
2214 return llvm::Intrinsic::x86_avx2_packssdw;
2216 case llvm::Intrinsic::x86_mmx_packsswb:
2217 case llvm::Intrinsic::x86_mmx_packuswb:
2218 return llvm::Intrinsic::x86_mmx_packsswb;
2220 case llvm::Intrinsic::x86_mmx_packssdw:
2221 return llvm::Intrinsic::x86_mmx_packssdw;
2234 void handleVectorPackIntrinsic(
IntrinsicInst &I,
unsigned EltSizeInBits = 0) {
2238 Value *S1 = getShadow(&I, 0);
2239 Value *S2 = getShadow(&I, 1);
2245 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->
getType();
2255 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2264 IRB.
CreateCall(ShadowFn, {S1_ext, S2_ext},
"_msprop_vector_pack");
2267 setOriginForNaryOp(I);
2272 const unsigned SignificantBitsPerResultElement = 16;
2274 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.
getType();
2275 unsigned ZeroBitsPerResultElement =
2279 Value *S = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2283 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
2286 setOriginForNaryOp(I);
2291 unsigned EltSizeInBits = 0) {
2293 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.
getType();
2295 Value *S = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2301 setOriginForNaryOp(I);
2309 Type *ResTy = getShadowTy(&I);
2310 Value *S0 = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2312 IRB.
CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2314 setOriginForNaryOp(I);
2322 Value *S0 = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2323 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2325 setOriginForNaryOp(I);
2330 case llvm::Intrinsic::bswap:
2333 case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
2334 case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
2335 case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
2336 case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
2337 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2338 case llvm::Intrinsic::x86_avx512_cvttss2usi:
2339 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2340 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2341 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2342 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2343 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2344 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2345 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2346 case llvm::Intrinsic::x86_sse2_cvtsd2si:
2347 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2348 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2349 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2350 case llvm::Intrinsic::x86_sse2_cvtss2sd:
2351 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2352 case llvm::Intrinsic::x86_sse2_cvttsd2si:
2353 case llvm::Intrinsic::x86_sse_cvtsi2ss:
2354 case llvm::Intrinsic::x86_sse_cvtsi642ss:
2355 case llvm::Intrinsic::x86_sse_cvtss2si64:
2356 case llvm::Intrinsic::x86_sse_cvtss2si:
2357 case llvm::Intrinsic::x86_sse_cvttss2si64:
2358 case llvm::Intrinsic::x86_sse_cvttss2si:
2359 handleVectorConvertIntrinsic(I, 1);
2361 case llvm::Intrinsic::x86_sse_cvtps2pi:
2362 case llvm::Intrinsic::x86_sse_cvttps2pi:
2363 handleVectorConvertIntrinsic(I, 2);
2366 case llvm::Intrinsic::x86_avx512_psll_w_512:
2367 case llvm::Intrinsic::x86_avx512_psll_d_512:
2368 case llvm::Intrinsic::x86_avx512_psll_q_512:
2369 case llvm::Intrinsic::x86_avx512_pslli_w_512:
2370 case llvm::Intrinsic::x86_avx512_pslli_d_512:
2371 case llvm::Intrinsic::x86_avx512_pslli_q_512:
2372 case llvm::Intrinsic::x86_avx512_psrl_w_512:
2373 case llvm::Intrinsic::x86_avx512_psrl_d_512:
2374 case llvm::Intrinsic::x86_avx512_psrl_q_512:
2375 case llvm::Intrinsic::x86_avx512_psra_w_512:
2376 case llvm::Intrinsic::x86_avx512_psra_d_512:
2377 case llvm::Intrinsic::x86_avx512_psra_q_512:
2378 case llvm::Intrinsic::x86_avx512_psrli_w_512:
2379 case llvm::Intrinsic::x86_avx512_psrli_d_512:
2380 case llvm::Intrinsic::x86_avx512_psrli_q_512:
2381 case llvm::Intrinsic::x86_avx512_psrai_w_512:
2382 case llvm::Intrinsic::x86_avx512_psrai_d_512:
2383 case llvm::Intrinsic::x86_avx512_psrai_q_512:
2384 case llvm::Intrinsic::x86_avx512_psra_q_256:
2385 case llvm::Intrinsic::x86_avx512_psra_q_128:
2386 case llvm::Intrinsic::x86_avx512_psrai_q_256:
2387 case llvm::Intrinsic::x86_avx512_psrai_q_128:
2388 case llvm::Intrinsic::x86_avx2_psll_w:
2389 case llvm::Intrinsic::x86_avx2_psll_d:
2390 case llvm::Intrinsic::x86_avx2_psll_q:
2391 case llvm::Intrinsic::x86_avx2_pslli_w:
2392 case llvm::Intrinsic::x86_avx2_pslli_d:
2393 case llvm::Intrinsic::x86_avx2_pslli_q:
2394 case llvm::Intrinsic::x86_avx2_psrl_w:
2395 case llvm::Intrinsic::x86_avx2_psrl_d:
2396 case llvm::Intrinsic::x86_avx2_psrl_q:
2397 case llvm::Intrinsic::x86_avx2_psra_w:
2398 case llvm::Intrinsic::x86_avx2_psra_d:
2399 case llvm::Intrinsic::x86_avx2_psrli_w:
2400 case llvm::Intrinsic::x86_avx2_psrli_d:
2401 case llvm::Intrinsic::x86_avx2_psrli_q:
2402 case llvm::Intrinsic::x86_avx2_psrai_w:
2403 case llvm::Intrinsic::x86_avx2_psrai_d:
2404 case llvm::Intrinsic::x86_sse2_psll_w:
2405 case llvm::Intrinsic::x86_sse2_psll_d:
2406 case llvm::Intrinsic::x86_sse2_psll_q:
2407 case llvm::Intrinsic::x86_sse2_pslli_w:
2408 case llvm::Intrinsic::x86_sse2_pslli_d:
2409 case llvm::Intrinsic::x86_sse2_pslli_q:
2410 case llvm::Intrinsic::x86_sse2_psrl_w:
2411 case llvm::Intrinsic::x86_sse2_psrl_d:
2412 case llvm::Intrinsic::x86_sse2_psrl_q:
2413 case llvm::Intrinsic::x86_sse2_psra_w:
2414 case llvm::Intrinsic::x86_sse2_psra_d:
2415 case llvm::Intrinsic::x86_sse2_psrli_w:
2416 case llvm::Intrinsic::x86_sse2_psrli_d:
2417 case llvm::Intrinsic::x86_sse2_psrli_q:
2418 case llvm::Intrinsic::x86_sse2_psrai_w:
2419 case llvm::Intrinsic::x86_sse2_psrai_d:
2420 case llvm::Intrinsic::x86_mmx_psll_w:
2421 case llvm::Intrinsic::x86_mmx_psll_d:
2422 case llvm::Intrinsic::x86_mmx_psll_q:
2423 case llvm::Intrinsic::x86_mmx_pslli_w:
2424 case llvm::Intrinsic::x86_mmx_pslli_d:
2425 case llvm::Intrinsic::x86_mmx_pslli_q:
2426 case llvm::Intrinsic::x86_mmx_psrl_w:
2427 case llvm::Intrinsic::x86_mmx_psrl_d:
2428 case llvm::Intrinsic::x86_mmx_psrl_q:
2429 case llvm::Intrinsic::x86_mmx_psra_w:
2430 case llvm::Intrinsic::x86_mmx_psra_d:
2431 case llvm::Intrinsic::x86_mmx_psrli_w:
2432 case llvm::Intrinsic::x86_mmx_psrli_d:
2433 case llvm::Intrinsic::x86_mmx_psrli_q:
2434 case llvm::Intrinsic::x86_mmx_psrai_w:
2435 case llvm::Intrinsic::x86_mmx_psrai_d:
2436 handleVectorShiftIntrinsic(I,
false);
2438 case llvm::Intrinsic::x86_avx2_psllv_d:
2439 case llvm::Intrinsic::x86_avx2_psllv_d_256:
2440 case llvm::Intrinsic::x86_avx512_psllv_d_512:
2441 case llvm::Intrinsic::x86_avx2_psllv_q:
2442 case llvm::Intrinsic::x86_avx2_psllv_q_256:
2443 case llvm::Intrinsic::x86_avx512_psllv_q_512:
2444 case llvm::Intrinsic::x86_avx2_psrlv_d:
2445 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2446 case llvm::Intrinsic::x86_avx512_psrlv_d_512:
2447 case llvm::Intrinsic::x86_avx2_psrlv_q:
2448 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2449 case llvm::Intrinsic::x86_avx512_psrlv_q_512:
2450 case llvm::Intrinsic::x86_avx2_psrav_d:
2451 case llvm::Intrinsic::x86_avx2_psrav_d_256:
2452 case llvm::Intrinsic::x86_avx512_psrav_d_512:
2453 case llvm::Intrinsic::x86_avx512_psrav_q_128:
2454 case llvm::Intrinsic::x86_avx512_psrav_q_256:
2455 case llvm::Intrinsic::x86_avx512_psrav_q_512:
2456 handleVectorShiftIntrinsic(I,
true);
2459 case llvm::Intrinsic::x86_sse2_packsswb_128:
2460 case llvm::Intrinsic::x86_sse2_packssdw_128:
2461 case llvm::Intrinsic::x86_sse2_packuswb_128:
2462 case llvm::Intrinsic::x86_sse41_packusdw:
2463 case llvm::Intrinsic::x86_avx2_packsswb:
2464 case llvm::Intrinsic::x86_avx2_packssdw:
2465 case llvm::Intrinsic::x86_avx2_packuswb:
2466 case llvm::Intrinsic::x86_avx2_packusdw:
2467 handleVectorPackIntrinsic(I);
2470 case llvm::Intrinsic::x86_mmx_packsswb:
2471 case llvm::Intrinsic::x86_mmx_packuswb:
2472 handleVectorPackIntrinsic(I, 16);
2475 case llvm::Intrinsic::x86_mmx_packssdw:
2476 handleVectorPackIntrinsic(I, 32);
2479 case llvm::Intrinsic::x86_mmx_psad_bw:
2480 case llvm::Intrinsic::x86_sse2_psad_bw:
2481 case llvm::Intrinsic::x86_avx2_psad_bw:
2482 handleVectorSadIntrinsic(I);
2485 case llvm::Intrinsic::x86_sse2_pmadd_wd:
2486 case llvm::Intrinsic::x86_avx2_pmadd_wd:
2487 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2488 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
2489 handleVectorPmaddIntrinsic(I);
2492 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
2493 handleVectorPmaddIntrinsic(I, 8);
2496 case llvm::Intrinsic::x86_mmx_pmadd_wd:
2497 handleVectorPmaddIntrinsic(I, 16);
2500 case llvm::Intrinsic::x86_sse_cmp_ss:
2501 case llvm::Intrinsic::x86_sse2_cmp_sd:
2502 case llvm::Intrinsic::x86_sse_comieq_ss:
2503 case llvm::Intrinsic::x86_sse_comilt_ss:
2504 case llvm::Intrinsic::x86_sse_comile_ss:
2505 case llvm::Intrinsic::x86_sse_comigt_ss:
2506 case llvm::Intrinsic::x86_sse_comige_ss:
2507 case llvm::Intrinsic::x86_sse_comineq_ss:
2508 case llvm::Intrinsic::x86_sse_ucomieq_ss:
2509 case llvm::Intrinsic::x86_sse_ucomilt_ss:
2510 case llvm::Intrinsic::x86_sse_ucomile_ss:
2511 case llvm::Intrinsic::x86_sse_ucomigt_ss:
2512 case llvm::Intrinsic::x86_sse_ucomige_ss:
2513 case llvm::Intrinsic::x86_sse_ucomineq_ss:
2514 case llvm::Intrinsic::x86_sse2_comieq_sd:
2515 case llvm::Intrinsic::x86_sse2_comilt_sd:
2516 case llvm::Intrinsic::x86_sse2_comile_sd:
2517 case llvm::Intrinsic::x86_sse2_comigt_sd:
2518 case llvm::Intrinsic::x86_sse2_comige_sd:
2519 case llvm::Intrinsic::x86_sse2_comineq_sd:
2520 case llvm::Intrinsic::x86_sse2_ucomieq_sd:
2521 case llvm::Intrinsic::x86_sse2_ucomilt_sd:
2522 case llvm::Intrinsic::x86_sse2_ucomile_sd:
2523 case llvm::Intrinsic::x86_sse2_ucomigt_sd:
2524 case llvm::Intrinsic::x86_sse2_ucomige_sd:
2525 case llvm::Intrinsic::x86_sse2_ucomineq_sd:
2526 handleVectorCompareScalarIntrinsic(I);
2529 case llvm::Intrinsic::x86_sse_cmp_ps:
2530 case llvm::Intrinsic::x86_sse2_cmp_pd:
2534 handleVectorComparePackedIntrinsic(I);
2538 if (!handleUnknownIntrinsic(I))
2539 visitInstruction(I);
2554 visitInstruction(I);
2558 assert(!isa<IntrinsicInst>(&I) &&
"intrinsics are handled elsewhere");
2569 Func->removeAttributes(AttributeSet::FunctionIndex,
2570 AttributeSet::get(
Func->getContext(),
2571 AttributeSet::FunctionIndex,
2579 unsigned ArgOffset = 0;
2580 DEBUG(
dbgs() <<
" CallSite: " << I <<
"\n");
2582 ArgIt !=
End; ++ArgIt) {
2586 DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << I <<
"\n");
2594 Value *ArgShadow = getShadow(A);
2595 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2596 DEBUG(
dbgs() <<
" Arg#" << i <<
": " << *A <<
2597 " Shadow: " << *ArgShadow <<
"\n");
2598 bool ArgIsInitialized =
false;
2602 "ByVal argument is not a pointer!");
2604 if (ArgOffset + Size > kParamTLSSize)
break;
2608 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2612 if (ArgOffset + Size > kParamTLSSize)
break;
2616 if (Cst && Cst->
isNullValue()) ArgIsInitialized =
true;
2618 if (MS.TrackOrigins && !ArgIsInitialized)
2620 getOriginPtrForArgument(A, IRB, ArgOffset));
2622 assert(Size != 0 && Store !=
nullptr);
2623 DEBUG(
dbgs() <<
" Param:" << *Store <<
"\n");
2624 ArgOffset +=
alignTo(Size, 8);
2626 DEBUG(
dbgs() <<
" done with call args\n");
2631 VAHelper->visitCallSite(CS, IRB);
2637 if (CS.
isCall() && cast<CallInst>(&
I)->isMustTailCall())
return;
2640 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2647 BasicBlock *NormalDest = cast<InvokeInst>(&
I)->getNormalDest();
2652 setShadow(&I, getCleanShadow(&I));
2653 setOrigin(&I, getCleanOrigin());
2658 "Could not find insertion point for retval shadow load");
2661 Value *RetvalShadow =
2662 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2664 setShadow(&I, RetvalShadow);
2665 if (MS.TrackOrigins)
2666 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2669 bool isAMustTailRetVal(
Value *RetVal) {
2670 if (
auto *I = dyn_cast<BitCastInst>(RetVal)) {
2673 if (
auto *I = dyn_cast<CallInst>(RetVal)) {
2674 return I->isMustTailCall();
2682 if (!RetVal)
return;
2684 if (isAMustTailRetVal(RetVal))
return;
2685 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2686 if (CheckReturnValue) {
2687 insertShadowCheck(RetVal, &I);
2688 Value *Shadow = getCleanShadow(RetVal);
2691 Value *Shadow = getShadow(RetVal);
2694 if (MS.TrackOrigins)
2695 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2699 void visitPHINode(
PHINode &I) {
2701 if (!PropagateShadow) {
2702 setShadow(&I, getCleanShadow(&I));
2703 setOrigin(&I, getCleanOrigin());
2707 ShadowPHINodes.push_back(&I);
2710 if (MS.TrackOrigins)
2716 setShadow(&I, getCleanShadow(&I));
2717 setOrigin(&I, getCleanOrigin());
2724 ConstantInt::get(MS.IntptrTy, Size)});
2726 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2731 if (PoisonStack && MS.TrackOrigins) {
2742 StackDescription.str());
2746 ConstantInt::get(MS.IntptrTy, Size),
2758 Value *Sb = getShadow(B);
2759 Value *Sc = getShadow(C);
2760 Value *Sd = getShadow(D);
2769 Sa1 = getPoisonedShadow(getShadowTy(I.
getType()));
2777 C = CreateAppToShadowCast(IRB, C);
2778 D = CreateAppToShadowCast(IRB, D);
2785 if (MS.TrackOrigins) {
2791 ConstantInt::getNullValue(FlatTy));
2793 ConstantInt::getNullValue(FlatTy));
2807 setShadow(&I, getCleanShadow(&I));
2808 setOrigin(&I, getCleanOrigin());
2812 setShadow(&I, getCleanShadow(&I));
2813 setOrigin(&I, getCleanOrigin());
2817 setShadow(&I, getCleanShadow(&I));
2818 setOrigin(&I, getCleanOrigin());
2828 DEBUG(
dbgs() <<
"ExtractValue: " << I <<
"\n");
2829 Value *AggShadow = getShadow(Agg);
2830 DEBUG(
dbgs() <<
" AggShadow: " << *AggShadow <<
"\n");
2832 DEBUG(
dbgs() <<
" ResShadow: " << *ResShadow <<
"\n");
2833 setShadow(&I, ResShadow);
2834 setOriginForNaryOp(I);
2839 DEBUG(
dbgs() <<
"InsertValue: " << I <<
"\n");
2842 DEBUG(
dbgs() <<
" AggShadow: " << *AggShadow <<
"\n");
2843 DEBUG(
dbgs() <<
" InsShadow: " << *InsShadow <<
"\n");
2845 DEBUG(
dbgs() <<
" Res: " << *Res <<
"\n");
2847 setOriginForNaryOp(I);
2851 if (
CallInst *CI = dyn_cast<CallInst>(&I)) {
2852 errs() <<
"ZZZ call " << CI->getCalledFunction()->getName() <<
"\n";
2856 errs() <<
"QQQ " << I <<
"\n";
2860 DEBUG(
dbgs() <<
"Resume: " << I <<
"\n");
2865 DEBUG(
dbgs() <<
"CleanupReturn: " << CRI <<
"\n");
2870 DEBUG(
dbgs() <<
"CatchReturn: " << CRI <<
"\n");
2878 DEBUG(
dbgs() <<
"DEFAULT: " << I <<
"\n");
2881 setShadow(&I, getCleanShadow(&I));
2882 setOrigin(&I, getCleanOrigin());
2887 struct VarArgAMD64Helper :
public VarArgHelper {
2890 static const unsigned AMD64GpEndOffset = 48;
2891 static const unsigned AMD64FpEndOffset = 176;
2894 MemorySanitizer &MS;
2895 MemorySanitizerVisitor &MSV;
2896 Value *VAArgTLSCopy;
2897 Value *VAArgOverflowSize;
2901 VarArgAMD64Helper(
Function &F, MemorySanitizer &MS,
2902 MemorySanitizerVisitor &MSV)
2903 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2904 VAArgOverflowSize(nullptr) {}
2906 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2912 return AK_FloatingPoint;
2914 return AK_GeneralPurpose;
2916 return AK_GeneralPurpose;
2929 unsigned GpOffset = 0;
2930 unsigned FpOffset = AMD64GpEndOffset;
2931 unsigned OverflowOffset = AMD64FpEndOffset;
2934 ArgIt !=
End; ++ArgIt) {
2938 bool IsByVal = CS.
paramHasAttr(ArgNo + 1, Attribute::ByVal);
2948 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2949 OverflowOffset +=
alignTo(ArgSize, 8);
2953 ArgKind AK = classifyArgument(A);
2954 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2956 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2960 case AK_GeneralPurpose:
2961 Base = getShadowPtrForVAArgument(A->
getType(), IRB, GpOffset);
2964 case AK_FloatingPoint:
2965 Base = getShadowPtrForVAArgument(A->
getType(), IRB, FpOffset);
2972 Base = getShadowPtrForVAArgument(A->
getType(), IRB, OverflowOffset);
2973 OverflowOffset +=
alignTo(ArgSize, 8);
2983 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2984 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2991 Base = IRB.
CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2992 return IRB.
CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3000 VAStartInstrumentationList.push_back(&I);
3002 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3010 void visitVACopyInst(
VACopyInst &I)
override {
3015 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3023 void finalizeInstrumentation()
override {
3024 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3025 "finalizeInstrumentation called twice");
3026 if (!VAStartInstrumentationList.empty()) {
3030 VAArgOverflowSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
3032 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
3034 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3035 IRB.
CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3040 for (
size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3041 CallInst *OrigInst = VAStartInstrumentationList[
i];
3045 Value *RegSaveAreaPtrPtr =
3048 ConstantInt::get(MS.IntptrTy, 16)),
3049 Type::getInt64PtrTy(*MS.C));
3051 Value *RegSaveAreaShadowPtr =
3052 MSV.getShadowPtr(RegSaveAreaPtr, IRB.
getInt8Ty(), IRB);
3054 AMD64FpEndOffset, 16);
3056 Value *OverflowArgAreaPtrPtr =
3059 ConstantInt::get(MS.IntptrTy, 8)),
3060 Type::getInt64PtrTy(*MS.C));
3062 Value *OverflowArgAreaShadowPtr =
3063 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.
getInt8Ty(), IRB);
3066 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
3072 struct VarArgMIPS64Helper :
public VarArgHelper {
3074 MemorySanitizer &MS;
3075 MemorySanitizerVisitor &MSV;
3076 Value *VAArgTLSCopy;
3081 VarArgMIPS64Helper(
Function &F, MemorySanitizer &MS,
3082 MemorySanitizerVisitor &MSV)
3083 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3084 VAArgSize(nullptr) {}
3087 unsigned VAArgOffset = 0;
3089 for (CallSite::arg_iterator ArgIt = CS.
arg_begin() +
3091 ArgIt !=
End; ++ArgIt) {
3102 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
3104 VAArgOffset =
alignTo(VAArgOffset, 8);
3111 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3118 Base = IRB.
CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3119 return IRB.
CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3125 VAStartInstrumentationList.push_back(&I);
3127 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3132 void visitVACopyInst(
VACopyInst &I)
override {
3135 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3142 void finalizeInstrumentation()
override {
3143 assert(!VAArgSize && !VAArgTLSCopy &&
3144 "finalizeInstrumentation called twice");
3146 VAArgSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
3147 Value *CopySize = IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3150 if (!VAStartInstrumentationList.empty()) {
3153 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3154 IRB.
CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3159 for (
size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3160 CallInst *OrigInst = VAStartInstrumentationList[
i];
3163 Value *RegSaveAreaPtrPtr =
3165 Type::getInt64PtrTy(*MS.C));
3167 Value *RegSaveAreaShadowPtr =
3168 MSV.getShadowPtr(RegSaveAreaPtr, IRB.
getInt8Ty(), IRB);
3169 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3176 struct VarArgAArch64Helper :
public VarArgHelper {
3177 static const unsigned kAArch64GrArgSize = 64;
3178 static const unsigned kAArch64VrArgSize = 128;
3180 static const unsigned AArch64GrBegOffset = 0;
3181 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
3183 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
3184 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
3185 + kAArch64VrArgSize;
3186 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
3189 MemorySanitizer &MS;
3190 MemorySanitizerVisitor &MSV;
3191 Value *VAArgTLSCopy;
3192 Value *VAArgOverflowSize;
3196 VarArgAArch64Helper(
Function &F, MemorySanitizer &MS,
3197 MemorySanitizerVisitor &MSV)
3198 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3199 VAArgOverflowSize(nullptr) {}
3201 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3206 return AK_FloatingPoint;
3209 return AK_GeneralPurpose;
3223 unsigned GrOffset = AArch64GrBegOffset;
3224 unsigned VrOffset = AArch64VrBegOffset;
3225 unsigned OverflowOffset = AArch64VAEndOffset;
3229 ArgIt !=
End; ++ArgIt) {
3233 ArgKind AK = classifyArgument(A);
3234 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
3236 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
3240 case AK_GeneralPurpose:
3241 Base = getShadowPtrForVAArgument(A->
getType(), IRB, GrOffset);
3244 case AK_FloatingPoint:
3245 Base = getShadowPtrForVAArgument(A->
getType(), IRB, VrOffset);
3254 Base = getShadowPtrForVAArgument(A->
getType(), IRB, OverflowOffset);
3255 OverflowOffset +=
alignTo(ArgSize, 8);
3265 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
3266 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3273 Base = IRB.
CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3274 return IRB.
CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3280 VAStartInstrumentationList.push_back(&I);
3282 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3289 void visitVACopyInst(
VACopyInst &I)
override {
3292 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3301 Value *SaveAreaPtrPtr =
3304 ConstantInt::get(MS.IntptrTy, offset)),
3305 Type::getInt64PtrTy(*MS.C));
3311 Value *SaveAreaPtr =
3314 ConstantInt::get(MS.IntptrTy, offset)),
3315 Type::getInt32PtrTy(*MS.C));
3317 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
3320 void finalizeInstrumentation()
override {
3321 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3322 "finalizeInstrumentation called twice");
3323 if (!VAStartInstrumentationList.empty()) {
3327 VAArgOverflowSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
3329 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
3331 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3332 IRB.
CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3335 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
3336 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
3340 for (
size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3341 CallInst *OrigInst = VAStartInstrumentationList[
i];
3360 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
3363 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
3364 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
3366 Value *GrRegSaveAreaPtr = IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
3369 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
3370 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
3372 Value *VrRegSaveAreaPtr = IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
3378 Value *GrRegSaveAreaShadowPtrOff =
3379 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
3381 Value *GrRegSaveAreaShadowPtr =
3382 MSV.getShadowPtr(GrRegSaveAreaPtr, IRB.
getInt8Ty(), IRB);
3385 GrRegSaveAreaShadowPtrOff);
3386 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
3388 IRB.
CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8);
3391 Value *VrRegSaveAreaShadowPtrOff =
3392 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
3394 Value *VrRegSaveAreaShadowPtr =
3395 MSV.getShadowPtr(VrRegSaveAreaPtr, IRB.
getInt8Ty(), IRB);
3401 VrRegSaveAreaShadowPtrOff);
3402 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
3404 IRB.
CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8);
3407 Value *StackSaveAreaShadowPtr =
3408 MSV.getShadowPtr(StackSaveAreaPtr, IRB.
getInt8Ty(), IRB);
3410 Value *StackSrcPtr =
3415 VAArgOverflowSize, 16);
3421 struct VarArgPowerPC64Helper :
public VarArgHelper {
3423 MemorySanitizer &MS;
3424 MemorySanitizerVisitor &MSV;
3425 Value *VAArgTLSCopy;
3430 VarArgPowerPC64Helper(
Function &F, MemorySanitizer &MS,
3431 MemorySanitizerVisitor &MSV)
3432 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3433 VAArgSize(nullptr) {}
3452 unsigned VAArgOffset = VAArgBase;
3455 ArgIt !=
End; ++ArgIt) {
3459 bool IsByVal = CS.
paramHasAttr(ArgNo + 1, Attribute::ByVal);
3467 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
3469 Value *Base = getShadowPtrForVAArgument(RealTy, IRB,
3470 VAArgOffset - VAArgBase);
3474 VAArgOffset +=
alignTo(ArgSize, 8);
3478 uint64_t ArgAlign = 8;
3491 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
3499 Base = getShadowPtrForVAArgument(A->
getType(), IRB,
3500 VAArgOffset - VAArgBase);
3504 VAArgOffset =
alignTo(VAArgOffset, 8);
3507 VAArgBase = VAArgOffset;
3511 VAArgOffset - VAArgBase);
3514 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3521 Base = IRB.
CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3522 return IRB.
CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3528 VAStartInstrumentationList.push_back(&I);
3530 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3535 void visitVACopyInst(
VACopyInst &I)
override {
3538 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.
getInt8Ty(), IRB);
3545 void finalizeInstrumentation()
override {
3546 assert(!VAArgSize && !VAArgTLSCopy &&
3547 "finalizeInstrumentation called twice");
3549 VAArgSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
3550 Value *CopySize = IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3553 if (!VAStartInstrumentationList.empty()) {
3556 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3557 IRB.
CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3562 for (
size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3563 CallInst *OrigInst = VAStartInstrumentationList[
i];
3566 Value *RegSaveAreaPtrPtr =
3568 Type::getInt64PtrTy(*MS.C));
3570 Value *RegSaveAreaShadowPtr =
3571 MSV.getShadowPtr(RegSaveAreaPtr, IRB.
getInt8Ty(), IRB);
3572 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3578 struct VarArgNoOpHelper :
public VarArgHelper {
3579 VarArgNoOpHelper(
Function &F, MemorySanitizer &MS,
3580 MemorySanitizerVisitor &MSV) {}
3586 void visitVACopyInst(
VACopyInst &I)
override {}
3588 void finalizeInstrumentation()
override {}
3591 VarArgHelper *CreateVarArgHelper(
Function &
Func, MemorySanitizer &Msan,
3592 MemorySanitizerVisitor &Visitor) {
3597 return new VarArgAMD64Helper(Func, Msan, Visitor);
3600 return new VarArgMIPS64Helper(Func, Msan, Visitor);
3602 return new VarArgAArch64Helper(Func, Msan, Visitor);
3605 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
3607 return new VarArgNoOpHelper(Func, Msan, Visitor);
3612 bool MemorySanitizer::runOnFunction(
Function &F) {
3613 if (&F == MsanCtorFunction)
3615 MemorySanitizerVisitor Visitor(F, *
this);
3623 AttributeSet::FunctionIndex,
B));
3625 return Visitor.runOnFunction();
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT,"arm-default-it","Generate IT block based on arch"), clEnumValN(RestrictedIT,"arm-restrict-it","Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT,"arm-no-restrict-it","Allow IT blocks based on ARMv7")))
Return a value (possibly void), from a function.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void push_back(const T &Elt)
unsigned Log2_32_Ceil(uint32_t Value)
Log2_32_Ceil - This function returns the ceil log base 2 of the specified value, 32 if the value is z...
A parsed version of the target data layout string in and methods for querying it. ...
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock::iterator GetInsertPoint() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM Argument representation.
Base class for instruction visitors.
Value * getAggregateOperand()
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
ArrayRef< unsigned > getIndices() const
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
A Module instance is used to store all the information related to an LLVM module. ...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
void setOrdering(AtomicOrdering Ordering)
Set the ordering constraint on this RMW.
AtomicOrdering getSuccessOrdering() const
Returns the ordering constraint on this cmpxchg.
Same, but only replaced by something equivalent.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
INITIALIZE_PASS_BEGIN(MemorySanitizer,"msan","MemorySanitizer: detects uninitialized reads.", false, false) INITIALIZE_PASS_END(MemorySanitizer
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Type * getSequentialElementType() const
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
This class represents zero extension of integer types.
unsigned getNumOperands() const
static const unsigned kRetvalTLSSize
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
This class represents a function call, abstracting a target machine's calling convention.
void setOrdering(AtomicOrdering Ordering)
Set the ordering constraint on this load.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
bool isSigned() const
Determine if this instruction is using a signed comparison.
Like Internal, but omit from symbol table.
This instruction constructs a fixed permutation of two input vectors.
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
Externally visible function.
A raw_ostream that writes to an SmallVector or SmallString.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
This class wraps the llvm.memset intrinsic.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
This class represents a sign extension of integer types.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
An instruction for reading from memory.
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
const std::string & getTargetTriple() const
Get the target triple which is a string describing the target host.
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Type * getPointerElementType() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
StringRef getName() const
Return a constant reference to the value's name.
void removeAttributes(unsigned i, AttributeSet Attrs)
removes the attributes from the list of attributes.
Value * CreateNot(Value *V, const Twine &Name="")
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, bool isVolatile=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
AllocaInst * CreateAlloca(Type *Ty, Value *ArraySize=nullptr, const Twine &Name="")
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
This class represents the LLVM 'select' instruction.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
This is the base class for all instructions that perform data casts.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
'undef' values are things that do not have specified contents.
This class wraps the llvm.memmove intrinsic.
Class to represent struct types.
Type * getArrayElementType() const
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
static GCRegistry::Add< StatepointGC > D("statepoint-example","an example strategy for statepoint")
unsigned getNumArgOperands() const
Return the number of call arguments.
Instruction * getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
bool isCall() const
isCall - true if a CallInst is enclosed.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
void setName(const Twine &Name)
Change the name of the value.
Type * getVectorElementType() const
This class represents a cast from a pointer to an integer.
AtomicOrdering
Atomic ordering for LLVM's memory model.
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Class to represent function types.
AtomicOrdering getOrdering() const
Returns the ordering constraint on this RMW.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
This represents the llvm.va_start intrinsic.
static const char *const kMsanInitName
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
Class to represent array types.
This instruction compares its operands according to the predicate given to the constructor.
This class represents a no-op cast from one type to another.
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Value * getInsertedValueOperand()
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
An instruction for storing to memory.
bool isArrayTy() const
True if this is an instance of ArrayType.
static const unsigned kParamTLSSize
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
Value * CreateInBoundsGEP(Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
const char * getOpcodeName() const
Type * getScalarType() const LLVM_READONLY
If this is a vector type, return the element type, otherwise return 'this'.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
This class represents a truncation of integer types.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
static const unsigned kMinOriginAlignment
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isInvoke() const
isInvoke - true if a InvokeInst is enclosed.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
LoadInst * CreateLoad(Value *Ptr, const char *Name)
bool isX86_MMXTy() const
Return true if this is X86 MMX.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
initializer< Ty > init(const Ty &Val)
This instruction inserts a single (scalar) element into a VectorType value.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
FunctionType * getFunctionType() const
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeSet AttributeList)
Look up the specified function in the module symbol table.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
unsigned getArgumentNo(Value::const_user_iterator I) const
Given a value use iterator, returns the argument that corresponds to it.
This is an important class for using LLVM in a threaded context.
bool isVectorTy() const
True if this is an instance of VectorType.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
This is an important base class in LLVM.
const Value * getCondition() const
Resume the propagation of an exception.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
bool isZeroValue() const
Return true if the value is negative zero or null value.
Represent the analysis usage information of a pass.
uint16_t getParamAlignment(uint16_t i) const
Extract the alignment for a call or parameter (0=unknown).
This instruction compares its operands according to the predicate given to the constructor.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
unsigned getBitWidth() const
Return the number of bits in the APInt.
static const unsigned End
bool doesNotAccessMemory() const
Determine if the call does not access memory.
FunctionPass class - This class is used to implement most global optimizations.
static const unsigned kShadowTLSAlignment
Value * getOperand(unsigned i) const
Value * getPointerOperand()
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
self_iterator getIterator()
Class to represent integer types.
Predicate getPredicate() const
Return the predicate for this instruction.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
This class represents a cast from an integer to a pointer.
bool isPointerTy() const
True if this is an instance of PointerType.
Comdat * getOrInsertComdat(StringRef Name)
Return the Comdat in the module with the specified name.
static std::string itostr(int64_t X)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
const Value * getTrueValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Triple - Helper class for working with autoconf configuration names.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
std::pair< Function *, Function * > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef())
Creates sanitizer constructor function, and calls sanitizer's init function from it.
BinaryOps getOpcode() const
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Iterator for intrusive lists based on ilist_node.
This is the shared class of boolean and integer constants.
InstrTy * getInstruction() const
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI=nullptr)
Remove all blocks that can not be reached from the function's entry.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Value * CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, const Twine &Name="")
Provides information about what library functions are available for the current target.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than ""this number of checks and origin stores, use callbacks instead of ""inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
TerminatorInst * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
CHAIN = SC CHAIN, Imm128 - System call.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
This class wraps the llvm.memcpy intrinsic.
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
AtomicOrdering getOrdering() const
Returns the ordering effect of this fence.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
const BasicBlock & getEntryBlock() const
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static const size_t kNumberOfAccessSizes
static GlobalVariable * createPrivateNonConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static int alignTo(int Num, int PowOf2)
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
Class to represent vector types.
Class for arbitrary precision integers.
bool isIntegerTy() const
True if this is an instance of IntegerType.
BasicBlock * getSinglePredecessor()
Return the predecessor of this block if it has a single predecessor block.
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
unsigned getVectorNumElements() const
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
The C convention as implemented on Windows/x86-64.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false))
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool isAggregateType() const
Return true if the type is an aggregate type.
CallInst * CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT)
InlineAsm::get - Return the specified uniqued inline asm string.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(false))
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
static const unsigned kOriginSize
iterator_range< df_iterator< T > > depth_first(const T &G)
bool isUnsigned() const
Determine if this instruction is using an unsigned comparison.
This represents the llvm.va_copy intrinsic.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LoadInst * CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name)
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
void setSuccessOrdering(AtomicOrdering Ordering)
Set the ordering constraint on this cmpxchg.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
FunctionPass * createMemorySanitizerPass(int TrackOrigins=0, bool Recover=false)
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
const Value * getFalseValue() const
StringRef - Represent a constant reference to a string, i.e.
iterator getFirstInsertionPt()
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
static const char *const kMsanModuleCtorName
const BasicBlock * getParent() const
iterator_range< arg_iterator > args()
A wrapper class for inspecting calls to intrinsic functions.
LLVMContext & getContext() const
Get the global data context.
bool isVoidTy() const
Return true if this is 'void'.
an instruction to allocate memory on the stack
This instruction inserts a struct field of array element value into an aggregate value.