Bug Summary

File:lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
Warning:line 202, column 13
Called C++ object pointer is null

Annotated Source Code

1//===-- EfficiencySanitizer.cpp - performance tuner -----------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of EfficiencySanitizer, a family of performance tuners
11// that detects multiple performance issues via separate sub-tools.
12//
13// The instrumentation phase is straightforward:
14// - Take action on every memory access: either inlined instrumentation,
15// or Inserted calls to our run-time library.
16// - Optimizations may apply to avoid instrumenting some of the accesses.
17// - Turn mem{set,cpy,move} instrinsics into library calls.
18// The rest is handled by the run-time library.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Instrumentation.h"
22#include "llvm/ADT/SmallString.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/ADT/StringExtras.h"
26#include "llvm/Analysis/TargetLibraryInfo.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/Module.h"
31#include "llvm/IR/Type.h"
32#include "llvm/Support/CommandLine.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Transforms/Utils/BasicBlockUtils.h"
36#include "llvm/Transforms/Utils/Local.h"
37#include "llvm/Transforms/Utils/ModuleUtils.h"
38
39using namespace llvm;
40
41#define DEBUG_TYPE"esan" "esan"
42
43// The tool type must be just one of these ClTool* options, as the tools
44// cannot be combined due to shadow memory constraints.
45static cl::opt<bool>
46 ClToolCacheFrag("esan-cache-frag", cl::init(false),
47 cl::desc("Detect data cache fragmentation"), cl::Hidden);
48static cl::opt<bool>
49 ClToolWorkingSet("esan-working-set", cl::init(false),
50 cl::desc("Measure the working set size"), cl::Hidden);
51// Each new tool will get its own opt flag here.
52// These are converted to EfficiencySanitizerOptions for use
53// in the code.
54
55static cl::opt<bool> ClInstrumentLoadsAndStores(
56 "esan-instrument-loads-and-stores", cl::init(true),
57 cl::desc("Instrument loads and stores"), cl::Hidden);
58static cl::opt<bool> ClInstrumentMemIntrinsics(
59 "esan-instrument-memintrinsics", cl::init(true),
60 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
61static cl::opt<bool> ClInstrumentFastpath(
62 "esan-instrument-fastpath", cl::init(true),
63 cl::desc("Instrument fastpath"), cl::Hidden);
64static cl::opt<bool> ClAuxFieldInfo(
65 "esan-aux-field-info", cl::init(true),
66 cl::desc("Generate binary with auxiliary struct field information"),
67 cl::Hidden);
68
69// Experiments show that the performance difference can be 2x or more,
70// and accuracy loss is typically negligible, so we turn this on by default.
71static cl::opt<bool> ClAssumeIntraCacheLine(
72 "esan-assume-intra-cache-line", cl::init(true),
73 cl::desc("Assume each memory access touches just one cache line, for "
74 "better performance but with a potential loss of accuracy."),
75 cl::Hidden);
76
77STATISTIC(NumInstrumentedLoads, "Number of instrumented loads")static llvm::Statistic NumInstrumentedLoads = {"esan", "NumInstrumentedLoads"
, "Number of instrumented loads", {0}, false}
;
78STATISTIC(NumInstrumentedStores, "Number of instrumented stores")static llvm::Statistic NumInstrumentedStores = {"esan", "NumInstrumentedStores"
, "Number of instrumented stores", {0}, false}
;
79STATISTIC(NumFastpaths, "Number of instrumented fastpaths")static llvm::Statistic NumFastpaths = {"esan", "NumFastpaths"
, "Number of instrumented fastpaths", {0}, false}
;
80STATISTIC(NumAccessesWithIrregularSize,static llvm::Statistic NumAccessesWithIrregularSize = {"esan"
, "NumAccessesWithIrregularSize", "Number of accesses with a size outside our targeted callout sizes"
, {0}, false}
81 "Number of accesses with a size outside our targeted callout sizes")static llvm::Statistic NumAccessesWithIrregularSize = {"esan"
, "NumAccessesWithIrregularSize", "Number of accesses with a size outside our targeted callout sizes"
, {0}, false}
;
82STATISTIC(NumIgnoredStructs, "Number of ignored structs")static llvm::Statistic NumIgnoredStructs = {"esan", "NumIgnoredStructs"
, "Number of ignored structs", {0}, false}
;
83STATISTIC(NumIgnoredGEPs, "Number of ignored GEP instructions")static llvm::Statistic NumIgnoredGEPs = {"esan", "NumIgnoredGEPs"
, "Number of ignored GEP instructions", {0}, false}
;
84STATISTIC(NumInstrumentedGEPs, "Number of instrumented GEP instructions")static llvm::Statistic NumInstrumentedGEPs = {"esan", "NumInstrumentedGEPs"
, "Number of instrumented GEP instructions", {0}, false}
;
85STATISTIC(NumAssumedIntraCacheLine,static llvm::Statistic NumAssumedIntraCacheLine = {"esan", "NumAssumedIntraCacheLine"
, "Number of accesses assumed to be intra-cache-line", {0}, false
}
86 "Number of accesses assumed to be intra-cache-line")static llvm::Statistic NumAssumedIntraCacheLine = {"esan", "NumAssumedIntraCacheLine"
, "Number of accesses assumed to be intra-cache-line", {0}, false
}
;
87
88static const uint64_t EsanCtorAndDtorPriority = 0;
89static const char *const EsanModuleCtorName = "esan.module_ctor";
90static const char *const EsanModuleDtorName = "esan.module_dtor";
91static const char *const EsanInitName = "__esan_init";
92static const char *const EsanExitName = "__esan_exit";
93
94// We need to specify the tool to the runtime earlier than
95// the ctor is called in some cases, so we set a global variable.
96static const char *const EsanWhichToolName = "__esan_which_tool";
97
98// We must keep these Shadow* constants consistent with the esan runtime.
99// FIXME: Try to place these shadow constants, the names of the __esan_*
100// interface functions, and the ToolType enum into a header shared between
101// llvm and compiler-rt.
102struct ShadowMemoryParams {
103 uint64_t ShadowMask;
104 uint64_t ShadowOffs[3];
105};
106
107static const ShadowMemoryParams ShadowParams47 = {
108 0x00000fffffffffffull,
109 {
110 0x0000130000000000ull, 0x0000220000000000ull, 0x0000440000000000ull,
111 }};
112
113static const ShadowMemoryParams ShadowParams40 = {
114 0x0fffffffffull,
115 {
116 0x1300000000ull, 0x2200000000ull, 0x4400000000ull,
117 }};
118
119// This array is indexed by the ToolType enum.
120static const int ShadowScale[] = {
121 0, // ESAN_None.
122 2, // ESAN_CacheFrag: 4B:1B, so 4 to 1 == >>2.
123 6, // ESAN_WorkingSet: 64B:1B, so 64 to 1 == >>6.
124};
125
126// MaxStructCounterNameSize is a soft size limit to avoid insanely long
127// names for those extremely large structs.
128static const unsigned MaxStructCounterNameSize = 512;
129
130namespace {
131
132static EfficiencySanitizerOptions
133OverrideOptionsFromCL(EfficiencySanitizerOptions Options) {
134 if (ClToolCacheFrag)
135 Options.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag;
136 else if (ClToolWorkingSet)
137 Options.ToolType = EfficiencySanitizerOptions::ESAN_WorkingSet;
138
139 // Direct opt invocation with no params will have the default ESAN_None.
140 // We run the default tool in that case.
141 if (Options.ToolType == EfficiencySanitizerOptions::ESAN_None)
142 Options.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag;
143
144 return Options;
145}
146
147// Create a constant for Str so that we can pass it to the run-time lib.
148static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
149 bool AllowMerging) {
150 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
151 // We use private linkage for module-local strings. If they can be merged
152 // with another one, we set the unnamed_addr attribute.
153 GlobalVariable *GV =
154 new GlobalVariable(M, StrConst->getType(), true,
155 GlobalValue::PrivateLinkage, StrConst, "");
156 if (AllowMerging)
157 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
158 GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
159 return GV;
160}
161
162/// EfficiencySanitizer: instrument each module to find performance issues.
163class EfficiencySanitizer : public ModulePass {
164public:
165 EfficiencySanitizer(
166 const EfficiencySanitizerOptions &Opts = EfficiencySanitizerOptions())
167 : ModulePass(ID), Options(OverrideOptionsFromCL(Opts)) {}
168 StringRef getPassName() const override;
169 void getAnalysisUsage(AnalysisUsage &AU) const override;
170 bool runOnModule(Module &M) override;
171 static char ID;
172
173private:
174 bool initOnModule(Module &M);
175 void initializeCallbacks(Module &M);
176 bool shouldIgnoreStructType(StructType *StructTy);
177 void createStructCounterName(
178 StructType *StructTy, SmallString<MaxStructCounterNameSize> &NameStr);
179 void createCacheFragAuxGV(
180 Module &M, const DataLayout &DL, StructType *StructTy,
181 GlobalVariable *&TypeNames, GlobalVariable *&Offsets, GlobalVariable *&Size);
182 GlobalVariable *createCacheFragInfoGV(Module &M, const DataLayout &DL,
183 Constant *UnitName);
184 Constant *createEsanInitToolInfoArg(Module &M, const DataLayout &DL);
185 void createDestructor(Module &M, Constant *ToolInfoArg);
186 bool runOnFunction(Function &F, Module &M);
187 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
188 bool instrumentMemIntrinsic(MemIntrinsic *MI);
189 bool instrumentGetElementPtr(Instruction *I, Module &M);
190 bool insertCounterUpdate(Instruction *I, StructType *StructTy,
191 unsigned CounterIdx);
192 unsigned getFieldCounterIdx(StructType *StructTy) {
193 return 0;
194 }
195 unsigned getArrayCounterIdx(StructType *StructTy) {
196 return StructTy->getNumElements();
197 }
198 unsigned getStructCounterSize(StructType *StructTy) {
199 // The struct counter array includes:
200 // - one counter for each struct field,
201 // - one counter for the struct access within an array.
202 return (StructTy->getNumElements()/*field*/ + 1/*array*/);
16
Called C++ object pointer is null
203 }
204 bool shouldIgnoreMemoryAccess(Instruction *I);
205 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
206 Value *appToShadow(Value *Shadow, IRBuilder<> &IRB);
207 bool instrumentFastpath(Instruction *I, const DataLayout &DL, bool IsStore,
208 Value *Addr, unsigned Alignment);
209 // Each tool has its own fastpath routine:
210 bool instrumentFastpathCacheFrag(Instruction *I, const DataLayout &DL,
211 Value *Addr, unsigned Alignment);
212 bool instrumentFastpathWorkingSet(Instruction *I, const DataLayout &DL,
213 Value *Addr, unsigned Alignment);
214
215 EfficiencySanitizerOptions Options;
216 LLVMContext *Ctx;
217 Type *IntptrTy;
218 // Our slowpath involves callouts to the runtime library.
219 // Access sizes are powers of two: 1, 2, 4, 8, 16.
220 static const size_t NumberOfAccessSizes = 5;
221 Function *EsanAlignedLoad[NumberOfAccessSizes];
222 Function *EsanAlignedStore[NumberOfAccessSizes];
223 Function *EsanUnalignedLoad[NumberOfAccessSizes];
224 Function *EsanUnalignedStore[NumberOfAccessSizes];
225 // For irregular sizes of any alignment:
226 Function *EsanUnalignedLoadN, *EsanUnalignedStoreN;
227 Function *MemmoveFn, *MemcpyFn, *MemsetFn;
228 Function *EsanCtorFunction;
229 Function *EsanDtorFunction;
230 // Remember the counter variable for each struct type to avoid
231 // recomputing the variable name later during instrumentation.
232 std::map<Type *, GlobalVariable *> StructTyMap;
233 ShadowMemoryParams ShadowParams;
234};
235} // namespace
236
237char EfficiencySanitizer::ID = 0;
238INITIALIZE_PASS_BEGIN(static void *initializeEfficiencySanitizerPassOnce(PassRegistry
&Registry) {
239 EfficiencySanitizer, "esan",static void *initializeEfficiencySanitizerPassOnce(PassRegistry
&Registry) {
240 "EfficiencySanitizer: finds performance issues.", false, false)static void *initializeEfficiencySanitizerPassOnce(PassRegistry
&Registry) {
241INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
242INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "EfficiencySanitizer: finds performance issues."
, "esan", &EfficiencySanitizer::ID, PassInfo::NormalCtor_t
(callDefaultCtor<EfficiencySanitizer>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeEfficiencySanitizerPassFlag; void llvm::initializeEfficiencySanitizerPass
(PassRegistry &Registry) { llvm::call_once(InitializeEfficiencySanitizerPassFlag
, initializeEfficiencySanitizerPassOnce, std::ref(Registry));
}
243 EfficiencySanitizer, "esan",PassInfo *PI = new PassInfo( "EfficiencySanitizer: finds performance issues."
, "esan", &EfficiencySanitizer::ID, PassInfo::NormalCtor_t
(callDefaultCtor<EfficiencySanitizer>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeEfficiencySanitizerPassFlag; void llvm::initializeEfficiencySanitizerPass
(PassRegistry &Registry) { llvm::call_once(InitializeEfficiencySanitizerPassFlag
, initializeEfficiencySanitizerPassOnce, std::ref(Registry));
}
244 "EfficiencySanitizer: finds performance issues.", false, false)PassInfo *PI = new PassInfo( "EfficiencySanitizer: finds performance issues."
, "esan", &EfficiencySanitizer::ID, PassInfo::NormalCtor_t
(callDefaultCtor<EfficiencySanitizer>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeEfficiencySanitizerPassFlag; void llvm::initializeEfficiencySanitizerPass
(PassRegistry &Registry) { llvm::call_once(InitializeEfficiencySanitizerPassFlag
, initializeEfficiencySanitizerPassOnce, std::ref(Registry));
}
245
246StringRef EfficiencySanitizer::getPassName() const {
247 return "EfficiencySanitizer";
248}
249
250void EfficiencySanitizer::getAnalysisUsage(AnalysisUsage &AU) const {
251 AU.addRequired<TargetLibraryInfoWrapperPass>();
252}
253
254ModulePass *
255llvm::createEfficiencySanitizerPass(const EfficiencySanitizerOptions &Options) {
256 return new EfficiencySanitizer(Options);
257}
258
259void EfficiencySanitizer::initializeCallbacks(Module &M) {
260 IRBuilder<> IRB(M.getContext());
261 // Initialize the callbacks.
262 for (size_t Idx = 0; Idx < NumberOfAccessSizes; ++Idx) {
263 const unsigned ByteSize = 1U << Idx;
264 std::string ByteSizeStr = utostr(ByteSize);
265 // We'll inline the most common (i.e., aligned and frequent sizes)
266 // load + store instrumentation: these callouts are for the slowpath.
267 SmallString<32> AlignedLoadName("__esan_aligned_load" + ByteSizeStr);
268 EsanAlignedLoad[Idx] =
269 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
270 AlignedLoadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
271 SmallString<32> AlignedStoreName("__esan_aligned_store" + ByteSizeStr);
272 EsanAlignedStore[Idx] =
273 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
274 AlignedStoreName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
275 SmallString<32> UnalignedLoadName("__esan_unaligned_load" + ByteSizeStr);
276 EsanUnalignedLoad[Idx] =
277 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
278 UnalignedLoadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
279 SmallString<32> UnalignedStoreName("__esan_unaligned_store" + ByteSizeStr);
280 EsanUnalignedStore[Idx] =
281 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
282 UnalignedStoreName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
283 }
284 EsanUnalignedLoadN = checkSanitizerInterfaceFunction(
285 M.getOrInsertFunction("__esan_unaligned_loadN", IRB.getVoidTy(),
286 IRB.getInt8PtrTy(), IntptrTy, nullptr));
287 EsanUnalignedStoreN = checkSanitizerInterfaceFunction(
288 M.getOrInsertFunction("__esan_unaligned_storeN", IRB.getVoidTy(),
289 IRB.getInt8PtrTy(), IntptrTy, nullptr));
290 MemmoveFn = checkSanitizerInterfaceFunction(
291 M.getOrInsertFunction("memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
292 IRB.getInt8PtrTy(), IntptrTy, nullptr));
293 MemcpyFn = checkSanitizerInterfaceFunction(
294 M.getOrInsertFunction("memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
295 IRB.getInt8PtrTy(), IntptrTy, nullptr));
296 MemsetFn = checkSanitizerInterfaceFunction(
297 M.getOrInsertFunction("memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
298 IRB.getInt32Ty(), IntptrTy, nullptr));
299}
300
301bool EfficiencySanitizer::shouldIgnoreStructType(StructType *StructTy) {
302 if (StructTy == nullptr || StructTy->isOpaque() /* no struct body */)
303 return true;
304 return false;
305}
306
307void EfficiencySanitizer::createStructCounterName(
308 StructType *StructTy, SmallString<MaxStructCounterNameSize> &NameStr) {
309 // Append NumFields and field type ids to avoid struct conflicts
310 // with the same name but different fields.
311 if (StructTy->hasName())
312 NameStr += StructTy->getName();
313 else
314 NameStr += "struct.anon";
315 // We allow the actual size of the StructCounterName to be larger than
316 // MaxStructCounterNameSize and append $NumFields and at least one
317 // field type id.
318 // Append $NumFields.
319 NameStr += "$";
320 Twine(StructTy->getNumElements()).toVector(NameStr);
321 // Append struct field type ids in the reverse order.
322 for (int i = StructTy->getNumElements() - 1; i >= 0; --i) {
323 NameStr += "$";
324 Twine(StructTy->getElementType(i)->getTypeID()).toVector(NameStr);
325 if (NameStr.size() >= MaxStructCounterNameSize)
326 break;
327 }
328 if (StructTy->isLiteral()) {
329 // End with $ for literal struct.
330 NameStr += "$";
331 }
332}
333
334// Create global variables with auxiliary information (e.g., struct field size,
335// offset, and type name) for better user report.
336void EfficiencySanitizer::createCacheFragAuxGV(
337 Module &M, const DataLayout &DL, StructType *StructTy,
338 GlobalVariable *&TypeName, GlobalVariable *&Offset,
339 GlobalVariable *&Size) {
340 auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
341 auto *Int32Ty = Type::getInt32Ty(*Ctx);
342 // FieldTypeName.
343 auto *TypeNameArrayTy = ArrayType::get(Int8PtrTy, StructTy->getNumElements());
344 TypeName = new GlobalVariable(M, TypeNameArrayTy, true,
345 GlobalVariable::InternalLinkage, nullptr);
346 SmallVector<Constant *, 16> TypeNameVec;
347 // FieldOffset.
348 auto *OffsetArrayTy = ArrayType::get(Int32Ty, StructTy->getNumElements());
349 Offset = new GlobalVariable(M, OffsetArrayTy, true,
350 GlobalVariable::InternalLinkage, nullptr);
351 SmallVector<Constant *, 16> OffsetVec;
352 // FieldSize
353 auto *SizeArrayTy = ArrayType::get(Int32Ty, StructTy->getNumElements());
354 Size = new GlobalVariable(M, SizeArrayTy, true,
355 GlobalVariable::InternalLinkage, nullptr);
356 SmallVector<Constant *, 16> SizeVec;
357 for (unsigned i = 0; i < StructTy->getNumElements(); ++i) {
358 Type *Ty = StructTy->getElementType(i);
359 std::string Str;
360 raw_string_ostream StrOS(Str);
361 Ty->print(StrOS);
362 TypeNameVec.push_back(
363 ConstantExpr::getPointerCast(
364 createPrivateGlobalForString(M, StrOS.str(), true),
365 Int8PtrTy));
366 OffsetVec.push_back(
367 ConstantInt::get(Int32Ty,
368 DL.getStructLayout(StructTy)->getElementOffset(i)));
369 SizeVec.push_back(ConstantInt::get(Int32Ty,
370 DL.getTypeAllocSize(Ty)));
371 }
372 TypeName->setInitializer(ConstantArray::get(TypeNameArrayTy, TypeNameVec));
373 Offset->setInitializer(ConstantArray::get(OffsetArrayTy, OffsetVec));
374 Size->setInitializer(ConstantArray::get(SizeArrayTy, SizeVec));
375}
376
377// Create the global variable for the cache-fragmentation tool.
378GlobalVariable *EfficiencySanitizer::createCacheFragInfoGV(
379 Module &M, const DataLayout &DL, Constant *UnitName) {
380 assert(Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag)((Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag
) ? static_cast<void> (0) : __assert_fail ("Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 380, __PRETTY_FUNCTION__))
;
381
382 auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
383 auto *Int8PtrPtrTy = Int8PtrTy->getPointerTo();
384 auto *Int32Ty = Type::getInt32Ty(*Ctx);
385 auto *Int32PtrTy = Type::getInt32PtrTy(*Ctx);
386 auto *Int64Ty = Type::getInt64Ty(*Ctx);
387 auto *Int64PtrTy = Type::getInt64PtrTy(*Ctx);
388 // This structure should be kept consistent with the StructInfo struct
389 // in the runtime library.
390 // struct StructInfo {
391 // const char *StructName;
392 // u32 Size;
393 // u32 NumFields;
394 // u32 *FieldOffset; // auxiliary struct field info.
395 // u32 *FieldSize; // auxiliary struct field info.
396 // const char **FieldTypeName; // auxiliary struct field info.
397 // u64 *FieldCounters;
398 // u64 *ArrayCounter;
399 // };
400 auto *StructInfoTy =
401 StructType::get(Int8PtrTy, Int32Ty, Int32Ty, Int32PtrTy, Int32PtrTy,
402 Int8PtrPtrTy, Int64PtrTy, Int64PtrTy, nullptr);
403 auto *StructInfoPtrTy = StructInfoTy->getPointerTo();
404 // This structure should be kept consistent with the CacheFragInfo struct
405 // in the runtime library.
406 // struct CacheFragInfo {
407 // const char *UnitName;
408 // u32 NumStructs;
409 // StructInfo *Structs;
410 // };
411 auto *CacheFragInfoTy =
412 StructType::get(Int8PtrTy, Int32Ty, StructInfoPtrTy, nullptr);
413
414 std::vector<StructType *> Vec = M.getIdentifiedStructTypes();
415 unsigned NumStructs = 0;
416 SmallVector<Constant *, 16> Initializers;
417
418 for (auto &StructTy : Vec) {
419 if (shouldIgnoreStructType(StructTy)) {
420 ++NumIgnoredStructs;
421 continue;
422 }
423 ++NumStructs;
424
425 // StructName.
426 SmallString<MaxStructCounterNameSize> CounterNameStr;
427 createStructCounterName(StructTy, CounterNameStr);
428 GlobalVariable *StructCounterName = createPrivateGlobalForString(
429 M, CounterNameStr, /*AllowMerging*/true);
430
431 // Counters.
432 // We create the counter array with StructCounterName and weak linkage
433 // so that the structs with the same name and layout from different
434 // compilation units will be merged into one.
435 auto *CounterArrayTy = ArrayType::get(Int64Ty,
436 getStructCounterSize(StructTy));
437 GlobalVariable *Counters =
438 new GlobalVariable(M, CounterArrayTy, false,
439 GlobalVariable::WeakAnyLinkage,
440 ConstantAggregateZero::get(CounterArrayTy),
441 CounterNameStr);
442
443 // Remember the counter variable for each struct type.
444 StructTyMap.insert(std::pair<Type *, GlobalVariable *>(StructTy, Counters));
445
446 // We pass the field type name array, offset array, and size array to
447 // the runtime for better reporting.
448 GlobalVariable *TypeName = nullptr, *Offset = nullptr, *Size = nullptr;
449 if (ClAuxFieldInfo)
450 createCacheFragAuxGV(M, DL, StructTy, TypeName, Offset, Size);
451
452 Constant *FieldCounterIdx[2];
453 FieldCounterIdx[0] = ConstantInt::get(Int32Ty, 0);
454 FieldCounterIdx[1] = ConstantInt::get(Int32Ty,
455 getFieldCounterIdx(StructTy));
456 Constant *ArrayCounterIdx[2];
457 ArrayCounterIdx[0] = ConstantInt::get(Int32Ty, 0);
458 ArrayCounterIdx[1] = ConstantInt::get(Int32Ty,
459 getArrayCounterIdx(StructTy));
460 Initializers.push_back(
461 ConstantStruct::get(
462 StructInfoTy,
463 ConstantExpr::getPointerCast(StructCounterName, Int8PtrTy),
464 ConstantInt::get(Int32Ty,
465 DL.getStructLayout(StructTy)->getSizeInBytes()),
466 ConstantInt::get(Int32Ty, StructTy->getNumElements()),
467 Offset == nullptr ? ConstantPointerNull::get(Int32PtrTy) :
468 ConstantExpr::getPointerCast(Offset, Int32PtrTy),
469 Size == nullptr ? ConstantPointerNull::get(Int32PtrTy) :
470 ConstantExpr::getPointerCast(Size, Int32PtrTy),
471 TypeName == nullptr ? ConstantPointerNull::get(Int8PtrPtrTy) :
472 ConstantExpr::getPointerCast(TypeName, Int8PtrPtrTy),
473 ConstantExpr::getGetElementPtr(CounterArrayTy, Counters,
474 FieldCounterIdx),
475 ConstantExpr::getGetElementPtr(CounterArrayTy, Counters,
476 ArrayCounterIdx),
477 nullptr));
478 }
479 // Structs.
480 Constant *StructInfo;
481 if (NumStructs == 0) {
482 StructInfo = ConstantPointerNull::get(StructInfoPtrTy);
483 } else {
484 auto *StructInfoArrayTy = ArrayType::get(StructInfoTy, NumStructs);
485 StructInfo = ConstantExpr::getPointerCast(
486 new GlobalVariable(M, StructInfoArrayTy, false,
487 GlobalVariable::InternalLinkage,
488 ConstantArray::get(StructInfoArrayTy, Initializers)),
489 StructInfoPtrTy);
490 }
491
492 auto *CacheFragInfoGV = new GlobalVariable(
493 M, CacheFragInfoTy, true, GlobalVariable::InternalLinkage,
494 ConstantStruct::get(CacheFragInfoTy,
495 UnitName,
496 ConstantInt::get(Int32Ty, NumStructs),
497 StructInfo,
498 nullptr));
499 return CacheFragInfoGV;
500}
501
502// Create the tool-specific argument passed to EsanInit and EsanExit.
503Constant *EfficiencySanitizer::createEsanInitToolInfoArg(Module &M,
504 const DataLayout &DL) {
505 // This structure contains tool-specific information about each compilation
506 // unit (module) and is passed to the runtime library.
507 GlobalVariable *ToolInfoGV = nullptr;
508
509 auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
510 // Compilation unit name.
511 auto *UnitName = ConstantExpr::getPointerCast(
512 createPrivateGlobalForString(M, M.getModuleIdentifier(), true),
513 Int8PtrTy);
514
515 // Create the tool-specific variable.
516 if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag)
517 ToolInfoGV = createCacheFragInfoGV(M, DL, UnitName);
518
519 if (ToolInfoGV != nullptr)
520 return ConstantExpr::getPointerCast(ToolInfoGV, Int8PtrTy);
521
522 // Create the null pointer if no tool-specific variable created.
523 return ConstantPointerNull::get(Int8PtrTy);
524}
525
526void EfficiencySanitizer::createDestructor(Module &M, Constant *ToolInfoArg) {
527 PointerType *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
528 EsanDtorFunction = Function::Create(FunctionType::get(Type::getVoidTy(*Ctx),
529 false),
530 GlobalValue::InternalLinkage,
531 EsanModuleDtorName, &M);
532 ReturnInst::Create(*Ctx, BasicBlock::Create(*Ctx, "", EsanDtorFunction));
533 IRBuilder<> IRB_Dtor(EsanDtorFunction->getEntryBlock().getTerminator());
534 Function *EsanExit = checkSanitizerInterfaceFunction(
535 M.getOrInsertFunction(EsanExitName, IRB_Dtor.getVoidTy(),
536 Int8PtrTy, nullptr));
537 EsanExit->setLinkage(Function::ExternalLinkage);
538 IRB_Dtor.CreateCall(EsanExit, {ToolInfoArg});
539 appendToGlobalDtors(M, EsanDtorFunction, EsanCtorAndDtorPriority);
540}
541
542bool EfficiencySanitizer::initOnModule(Module &M) {
543
544 Triple TargetTriple(M.getTargetTriple());
545 if (TargetTriple.getArch() == Triple::mips64 || TargetTriple.getArch() == Triple::mips64el)
546 ShadowParams = ShadowParams40;
547 else
548 ShadowParams = ShadowParams47;
549
550 Ctx = &M.getContext();
551 const DataLayout &DL = M.getDataLayout();
552 IRBuilder<> IRB(M.getContext());
553 IntegerType *OrdTy = IRB.getInt32Ty();
554 PointerType *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
555 IntptrTy = DL.getIntPtrType(M.getContext());
556 // Create the variable passed to EsanInit and EsanExit.
557 Constant *ToolInfoArg = createEsanInitToolInfoArg(M, DL);
558 // Constructor
559 // We specify the tool type both in the EsanWhichToolName global
560 // and as an arg to the init routine as a sanity check.
561 std::tie(EsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
562 M, EsanModuleCtorName, EsanInitName, /*InitArgTypes=*/{OrdTy, Int8PtrTy},
563 /*InitArgs=*/{
564 ConstantInt::get(OrdTy, static_cast<int>(Options.ToolType)),
565 ToolInfoArg});
566 appendToGlobalCtors(M, EsanCtorFunction, EsanCtorAndDtorPriority);
567
568 createDestructor(M, ToolInfoArg);
569
570 new GlobalVariable(M, OrdTy, true,
571 GlobalValue::WeakAnyLinkage,
572 ConstantInt::get(OrdTy,
573 static_cast<int>(Options.ToolType)),
574 EsanWhichToolName);
575
576 return true;
577}
578
579Value *EfficiencySanitizer::appToShadow(Value *Shadow, IRBuilder<> &IRB) {
580 // Shadow = ((App & Mask) + Offs) >> Scale
581 Shadow = IRB.CreateAnd(Shadow, ConstantInt::get(IntptrTy, ShadowParams.ShadowMask));
582 uint64_t Offs;
583 int Scale = ShadowScale[Options.ToolType];
584 if (Scale <= 2)
585 Offs = ShadowParams.ShadowOffs[Scale];
586 else
587 Offs = ShadowParams.ShadowOffs[0] << Scale;
588 Shadow = IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Offs));
589 if (Scale > 0)
590 Shadow = IRB.CreateLShr(Shadow, Scale);
591 return Shadow;
592}
593
594bool EfficiencySanitizer::shouldIgnoreMemoryAccess(Instruction *I) {
595 if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) {
596 // We'd like to know about cache fragmentation in vtable accesses and
597 // constant data references, so we do not currently ignore anything.
598 return false;
599 } else if (Options.ToolType == EfficiencySanitizerOptions::ESAN_WorkingSet) {
600 // TODO: the instrumentation disturbs the data layout on the stack, so we
601 // may want to add an option to ignore stack references (if we can
602 // distinguish them) to reduce overhead.
603 }
604 // TODO(bruening): future tools will be returning true for some cases.
605 return false;
606}
607
608bool EfficiencySanitizer::runOnModule(Module &M) {
609 bool Res = initOnModule(M);
610 initializeCallbacks(M);
611 for (auto &F : M) {
612 Res |= runOnFunction(F, M);
613 }
614 return Res;
615}
616
617bool EfficiencySanitizer::runOnFunction(Function &F, Module &M) {
618 // This is required to prevent instrumenting the call to __esan_init from
619 // within the module constructor.
620 if (&F == EsanCtorFunction)
621 return false;
622 SmallVector<Instruction *, 8> LoadsAndStores;
623 SmallVector<Instruction *, 8> MemIntrinCalls;
624 SmallVector<Instruction *, 8> GetElementPtrs;
625 bool Res = false;
626 const DataLayout &DL = M.getDataLayout();
627 const TargetLibraryInfo *TLI =
628 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
629
630 for (auto &BB : F) {
631 for (auto &Inst : BB) {
632 if ((isa<LoadInst>(Inst) || isa<StoreInst>(Inst) ||
633 isa<AtomicRMWInst>(Inst) || isa<AtomicCmpXchgInst>(Inst)) &&
634 !shouldIgnoreMemoryAccess(&Inst))
635 LoadsAndStores.push_back(&Inst);
636 else if (isa<MemIntrinsic>(Inst))
637 MemIntrinCalls.push_back(&Inst);
638 else if (isa<GetElementPtrInst>(Inst))
639 GetElementPtrs.push_back(&Inst);
640 else if (CallInst *CI = dyn_cast<CallInst>(&Inst))
641 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
642 }
643 }
644
645 if (ClInstrumentLoadsAndStores) {
646 for (auto Inst : LoadsAndStores) {
647 Res |= instrumentLoadOrStore(Inst, DL);
648 }
649 }
650
651 if (ClInstrumentMemIntrinsics) {
652 for (auto Inst : MemIntrinCalls) {
653 Res |= instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
654 }
655 }
656
657 if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) {
658 for (auto Inst : GetElementPtrs) {
659 Res |= instrumentGetElementPtr(Inst, M);
660 }
661 }
662
663 return Res;
664}
665
666bool EfficiencySanitizer::instrumentLoadOrStore(Instruction *I,
667 const DataLayout &DL) {
668 IRBuilder<> IRB(I);
669 bool IsStore;
670 Value *Addr;
671 unsigned Alignment;
672 if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
673 IsStore = false;
674 Alignment = Load->getAlignment();
675 Addr = Load->getPointerOperand();
676 } else if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
677 IsStore = true;
678 Alignment = Store->getAlignment();
679 Addr = Store->getPointerOperand();
680 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
681 IsStore = true;
682 Alignment = 0;
683 Addr = RMW->getPointerOperand();
684 } else if (AtomicCmpXchgInst *Xchg = dyn_cast<AtomicCmpXchgInst>(I)) {
685 IsStore = true;
686 Alignment = 0;
687 Addr = Xchg->getPointerOperand();
688 } else
689 llvm_unreachable("Unsupported mem access type")::llvm::llvm_unreachable_internal("Unsupported mem access type"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 689)
;
690
691 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
692 const uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8;
693 Value *OnAccessFunc = nullptr;
694
695 // Convert 0 to the default alignment.
696 if (Alignment == 0)
697 Alignment = DL.getPrefTypeAlignment(OrigTy);
698
699 if (IsStore)
700 NumInstrumentedStores++;
701 else
702 NumInstrumentedLoads++;
703 int Idx = getMemoryAccessFuncIndex(Addr, DL);
704 if (Idx < 0) {
705 OnAccessFunc = IsStore ? EsanUnalignedStoreN : EsanUnalignedLoadN;
706 IRB.CreateCall(OnAccessFunc,
707 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
708 ConstantInt::get(IntptrTy, TypeSizeBytes)});
709 } else {
710 if (ClInstrumentFastpath &&
711 instrumentFastpath(I, DL, IsStore, Addr, Alignment)) {
712 NumFastpaths++;
713 return true;
714 }
715 if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0)
716 OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx];
717 else
718 OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx];
719 IRB.CreateCall(OnAccessFunc,
720 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
721 }
722 return true;
723}
724
725// It's simplest to replace the memset/memmove/memcpy intrinsics with
726// calls that the runtime library intercepts.
727// Our pass is late enough that calls should not turn back into intrinsics.
728bool EfficiencySanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
729 IRBuilder<> IRB(MI);
730 bool Res = false;
731 if (isa<MemSetInst>(MI)) {
732 IRB.CreateCall(
733 MemsetFn,
734 {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()),
735 IRB.CreateIntCast(MI->getArgOperand(1), IRB.getInt32Ty(), false),
736 IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)});
737 MI->eraseFromParent();
738 Res = true;
739 } else if (isa<MemTransferInst>(MI)) {
740 IRB.CreateCall(
741 isa<MemCpyInst>(MI) ? MemcpyFn : MemmoveFn,
742 {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()),
743 IRB.CreatePointerCast(MI->getArgOperand(1), IRB.getInt8PtrTy()),
744 IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)});
745 MI->eraseFromParent();
746 Res = true;
747 } else
748 llvm_unreachable("Unsupported mem intrinsic type")::llvm::llvm_unreachable_internal("Unsupported mem intrinsic type"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 748)
;
749 return Res;
750}
751
752bool EfficiencySanitizer::instrumentGetElementPtr(Instruction *I, Module &M) {
753 GetElementPtrInst *GepInst = dyn_cast<GetElementPtrInst>(I);
754 bool Res = false;
755 if (GepInst == nullptr || GepInst->getNumIndices() == 1) {
1
Assuming the condition is false
2
Assuming the condition is false
3
Taking false branch
756 ++NumIgnoredGEPs;
757 return false;
758 }
759 Type *SourceTy = GepInst->getSourceElementType();
760 StructType *StructTy = nullptr;
4
'StructTy' initialized to a null pointer value
761 ConstantInt *Idx;
762 // Check if GEP calculates address from a struct array.
763 if (isa<StructType>(SourceTy)) {
5
Taking false branch
764 StructTy = cast<StructType>(SourceTy);
765 Idx = dyn_cast<ConstantInt>(GepInst->getOperand(1));
766 if ((Idx == nullptr || Idx->getSExtValue() != 0) &&
767 !shouldIgnoreStructType(StructTy) && StructTyMap.count(StructTy) != 0)
768 Res |= insertCounterUpdate(I, StructTy, getArrayCounterIdx(StructTy));
769 }
770 // Iterate all (except the first and the last) idx within each GEP instruction
771 // for possible nested struct field address calculation.
772 for (unsigned i = 1; i < GepInst->getNumIndices(); ++i) {
6
Assuming the condition is true
7
Loop condition is true. Entering loop body
773 SmallVector<Value *, 8> IdxVec(GepInst->idx_begin(),
774 GepInst->idx_begin() + i);
775 Type *Ty = GetElementPtrInst::getIndexedType(SourceTy, IdxVec);
776 unsigned CounterIdx = 0;
777 if (isa<ArrayType>(Ty)) {
8
Taking false branch
778 ArrayType *ArrayTy = cast<ArrayType>(Ty);
779 StructTy = dyn_cast<StructType>(ArrayTy->getElementType());
780 if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0)
781 continue;
782 // The last counter for struct array access.
783 CounterIdx = getArrayCounterIdx(StructTy);
784 } else if (isa<StructType>(Ty)) {
9
Taking false branch
785 StructTy = cast<StructType>(Ty);
786 if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0)
787 continue;
788 // Get the StructTy's subfield index.
789 Idx = cast<ConstantInt>(GepInst->getOperand(i+1));
790 assert(Idx->getSExtValue() >= 0 &&((Idx->getSExtValue() >= 0 && Idx->getSExtValue
() < StructTy->getNumElements()) ? static_cast<void>
(0) : __assert_fail ("Idx->getSExtValue() >= 0 && Idx->getSExtValue() < StructTy->getNumElements()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 791, __PRETTY_FUNCTION__))
791 Idx->getSExtValue() < StructTy->getNumElements())((Idx->getSExtValue() >= 0 && Idx->getSExtValue
() < StructTy->getNumElements()) ? static_cast<void>
(0) : __assert_fail ("Idx->getSExtValue() >= 0 && Idx->getSExtValue() < StructTy->getNumElements()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 791, __PRETTY_FUNCTION__))
;
792 CounterIdx = getFieldCounterIdx(StructTy) + Idx->getSExtValue();
793 }
794 Res |= insertCounterUpdate(I, StructTy, CounterIdx);
10
Passing null pointer value via 2nd parameter 'StructTy'
11
Calling 'EfficiencySanitizer::insertCounterUpdate'
795 }
796 if (Res)
797 ++NumInstrumentedGEPs;
798 else
799 ++NumIgnoredGEPs;
800 return Res;
801}
802
803bool EfficiencySanitizer::insertCounterUpdate(Instruction *I,
804 StructType *StructTy,
805 unsigned CounterIdx) {
806 GlobalVariable *CounterArray = StructTyMap[StructTy];
807 if (CounterArray == nullptr)
12
Assuming the condition is false
13
Taking false branch
808 return false;
809 IRBuilder<> IRB(I);
810 Constant *Indices[2];
811 // Xref http://llvm.org/docs/LangRef.html#i-getelementptr and
812 // http://llvm.org/docs/GetElementPtr.html.
813 // The first index of the GEP instruction steps through the first operand,
814 // i.e., the array itself.
815 Indices[0] = ConstantInt::get(IRB.getInt32Ty(), 0);
816 // The second index is the index within the array.
817 Indices[1] = ConstantInt::get(IRB.getInt32Ty(), CounterIdx);
818 Constant *Counter =
819 ConstantExpr::getGetElementPtr(
820 ArrayType::get(IRB.getInt64Ty(), getStructCounterSize(StructTy)),
14
Passing null pointer value via 1st parameter 'StructTy'
15
Calling 'EfficiencySanitizer::getStructCounterSize'
821 CounterArray, Indices);
822 Value *Load = IRB.CreateLoad(Counter);
823 IRB.CreateStore(IRB.CreateAdd(Load, ConstantInt::get(IRB.getInt64Ty(), 1)),
824 Counter);
825 return true;
826}
827
828int EfficiencySanitizer::getMemoryAccessFuncIndex(Value *Addr,
829 const DataLayout &DL) {
830 Type *OrigPtrTy = Addr->getType();
831 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
832 assert(OrigTy->isSized())((OrigTy->isSized()) ? static_cast<void> (0) : __assert_fail
("OrigTy->isSized()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 832, __PRETTY_FUNCTION__))
;
833 // The size is always a multiple of 8.
834 uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8;
835 if (TypeSizeBytes != 1 && TypeSizeBytes != 2 && TypeSizeBytes != 4 &&
836 TypeSizeBytes != 8 && TypeSizeBytes != 16) {
837 // Irregular sizes do not have per-size call targets.
838 NumAccessesWithIrregularSize++;
839 return -1;
840 }
841 size_t Idx = countTrailingZeros(TypeSizeBytes);
842 assert(Idx < NumberOfAccessSizes)((Idx < NumberOfAccessSizes) ? static_cast<void> (0)
: __assert_fail ("Idx < NumberOfAccessSizes", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 842, __PRETTY_FUNCTION__))
;
843 return Idx;
844}
845
846bool EfficiencySanitizer::instrumentFastpath(Instruction *I,
847 const DataLayout &DL, bool IsStore,
848 Value *Addr, unsigned Alignment) {
849 if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) {
850 return instrumentFastpathCacheFrag(I, DL, Addr, Alignment);
851 } else if (Options.ToolType == EfficiencySanitizerOptions::ESAN_WorkingSet) {
852 return instrumentFastpathWorkingSet(I, DL, Addr, Alignment);
853 }
854 return false;
855}
856
857bool EfficiencySanitizer::instrumentFastpathCacheFrag(Instruction *I,
858 const DataLayout &DL,
859 Value *Addr,
860 unsigned Alignment) {
861 // Do nothing.
862 return true; // Return true to avoid slowpath instrumentation.
863}
864
865bool EfficiencySanitizer::instrumentFastpathWorkingSet(
866 Instruction *I, const DataLayout &DL, Value *Addr, unsigned Alignment) {
867 assert(ShadowScale[Options.ToolType] == 6)((ShadowScale[Options.ToolType] == 6) ? static_cast<void>
(0) : __assert_fail ("ShadowScale[Options.ToolType] == 6", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 867, __PRETTY_FUNCTION__))
; // The code below assumes this
868 IRBuilder<> IRB(I);
869 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
870 const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
871 // Bail to the slowpath if the access might touch multiple cache lines.
872 // An access aligned to its size is guaranteed to be intra-cache-line.
873 // getMemoryAccessFuncIndex has already ruled out a size larger than 16
874 // and thus larger than a cache line for platforms this tool targets
875 // (and our shadow memory setup assumes 64-byte cache lines).
876 assert(TypeSize <= 128)((TypeSize <= 128) ? static_cast<void> (0) : __assert_fail
("TypeSize <= 128", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp"
, 876, __PRETTY_FUNCTION__))
;
877 if (!(TypeSize == 8 ||
878 (Alignment % (TypeSize / 8)) == 0)) {
879 if (ClAssumeIntraCacheLine)
880 ++NumAssumedIntraCacheLine;
881 else
882 return false;
883 }
884
885 // We inline instrumentation to set the corresponding shadow bits for
886 // each cache line touched by the application. Here we handle a single
887 // load or store where we've already ruled out the possibility that it
888 // might touch more than one cache line and thus we simply update the
889 // shadow memory for a single cache line.
890 // Our shadow memory model is fine with races when manipulating shadow values.
891 // We generate the following code:
892 //
893 // const char BitMask = 0x81;
894 // char *ShadowAddr = appToShadow(AppAddr);
895 // if ((*ShadowAddr & BitMask) != BitMask)
896 // *ShadowAddr |= Bitmask;
897 //
898 Value *AddrPtr = IRB.CreatePointerCast(Addr, IntptrTy);
899 Value *ShadowPtr = appToShadow(AddrPtr, IRB);
900 Type *ShadowTy = IntegerType::get(*Ctx, 8U);
901 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
902 // The bottom bit is used for the current sampling period's working set.
903 // The top bit is used for the total working set. We set both on each
904 // memory access, if they are not already set.
905 Value *ValueMask = ConstantInt::get(ShadowTy, 0x81); // 10000001B
906
907 Value *OldValue = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
908 // The AND and CMP will be turned into a TEST instruction by the compiler.
909 Value *Cmp = IRB.CreateICmpNE(IRB.CreateAnd(OldValue, ValueMask), ValueMask);
910 TerminatorInst *CmpTerm = SplitBlockAndInsertIfThen(Cmp, I, false);
911 // FIXME: do I need to call SetCurrentDebugLocation?
912 IRB.SetInsertPoint(CmpTerm);
913 // We use OR to set the shadow bits to avoid corrupting the middle 6 bits,
914 // which are used by the runtime library.
915 Value *NewVal = IRB.CreateOr(OldValue, ValueMask);
916 IRB.CreateStore(NewVal, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
917 IRB.SetInsertPoint(I);
918
919 return true;
920}