Bug Summary

File:lib/Transforms/Instrumentation/MemorySanitizer.cpp
Warning:line 1162, column 11
Forming reference to null pointer

Annotated Source Code

1//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file is a part of MemorySanitizer, a detector of uninitialized
11/// reads.
12///
13/// The algorithm of the tool is similar to Memcheck
14/// (http://goo.gl/QKbem). We associate a few shadow bits with every
15/// byte of the application memory, poison the shadow of the malloc-ed
16/// or alloca-ed memory, load the shadow bits on every memory read,
17/// propagate the shadow bits through some of the arithmetic
18/// instruction (including MOV), store the shadow bits on every memory
19/// write, report a bug on some other instructions (e.g. JMP) if the
20/// associated shadow is poisoned.
21///
22/// But there are differences too. The first and the major one:
23/// compiler instrumentation instead of binary instrumentation. This
24/// gives us much better register allocation, possible compiler
25/// optimizations and a fast start-up. But this brings the major issue
26/// as well: msan needs to see all program events, including system
27/// calls and reads/writes in system libraries, so we either need to
28/// compile *everything* with msan or use a binary translation
29/// component (e.g. DynamoRIO) to instrument pre-built libraries.
30/// Another difference from Memcheck is that we use 8 shadow bits per
31/// byte of application memory and use a direct shadow mapping. This
32/// greatly simplifies the instrumentation code and avoids races on
33/// shadow updates (Memcheck is single-threaded so races are not a
34/// concern there. Memcheck uses 2 shadow bits per byte with a slow
35/// path storage that uses 8 bits per byte).
36///
37/// The default value of shadow is 0, which means "clean" (not poisoned).
38///
39/// Every module initializer should call __msan_init to ensure that the
40/// shadow memory is ready. On error, __msan_warning is called. Since
41/// parameters and return values may be passed via registers, we have a
42/// specialized thread-local shadow for return values
43/// (__msan_retval_tls) and parameters (__msan_param_tls).
44///
45/// Origin tracking.
46///
47/// MemorySanitizer can track origins (allocation points) of all uninitialized
48/// values. This behavior is controlled with a flag (msan-track-origins) and is
49/// disabled by default.
50///
51/// Origins are 4-byte values created and interpreted by the runtime library.
52/// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53/// of application memory. Propagation of origins is basically a bunch of
54/// "select" instructions that pick the origin of a dirty argument, if an
55/// instruction has one.
56///
57/// Every 4 aligned, consecutive bytes of application memory have one origin
58/// value associated with them. If these bytes contain uninitialized data
59/// coming from 2 different allocations, the last store wins. Because of this,
60/// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61/// practice.
62///
63/// Origins are meaningless for fully initialized values, so MemorySanitizer
64/// avoids storing origin to memory when a fully initialized value is stored.
65/// This way it avoids needless overwritting origin of the 4-byte region on
66/// a short (i.e. 1 byte) clean store, and it is also good for performance.
67///
68/// Atomic handling.
69///
70/// Ideally, every atomic store of application value should update the
71/// corresponding shadow location in an atomic way. Unfortunately, atomic store
72/// of two disjoint locations can not be done without severe slowdown.
73///
74/// Therefore, we implement an approximation that may err on the safe side.
75/// In this implementation, every atomically accessed location in the program
76/// may only change from (partially) uninitialized to fully initialized, but
77/// not the other way around. We load the shadow _after_ the application load,
78/// and we store the shadow _before_ the app store. Also, we always store clean
79/// shadow (if the application store is atomic). This way, if the store-load
80/// pair constitutes a happens-before arc, shadow store and load are correctly
81/// ordered such that the load will get either the value that was stored, or
82/// some later value (which is always clean).
83///
84/// This does not work very well with Compare-And-Swap (CAS) and
85/// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86/// must store the new shadow before the app operation, and load the shadow
87/// after the app operation. Computers don't work this way. Current
88/// implementation ignores the load aspect of CAS/RMW, always returning a clean
89/// value. It implements the store part as a simple atomic store by storing a
90/// clean shadow.
91
92//===----------------------------------------------------------------------===//
93
94#include "llvm/ADT/DepthFirstIterator.h"
95#include "llvm/ADT/SmallString.h"
96#include "llvm/ADT/SmallVector.h"
97#include "llvm/ADT/StringExtras.h"
98#include "llvm/ADT/Triple.h"
99#include "llvm/IR/DataLayout.h"
100#include "llvm/IR/Function.h"
101#include "llvm/IR/IRBuilder.h"
102#include "llvm/IR/InlineAsm.h"
103#include "llvm/IR/InstVisitor.h"
104#include "llvm/IR/IntrinsicInst.h"
105#include "llvm/IR/LLVMContext.h"
106#include "llvm/IR/MDBuilder.h"
107#include "llvm/IR/Module.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/ValueMap.h"
110#include "llvm/Support/CommandLine.h"
111#include "llvm/Support/Debug.h"
112#include "llvm/Support/raw_ostream.h"
113#include "llvm/Transforms/Instrumentation.h"
114#include "llvm/Transforms/Utils/BasicBlockUtils.h"
115#include "llvm/Transforms/Utils/Local.h"
116#include "llvm/Transforms/Utils/ModuleUtils.h"
117
118using namespace llvm;
119
120#define DEBUG_TYPE"msan" "msan"
121
122static const unsigned kOriginSize = 4;
123static const unsigned kMinOriginAlignment = 4;
124static const unsigned kShadowTLSAlignment = 8;
125
126// These constants must be kept in sync with the ones in msan.h.
127static const unsigned kParamTLSSize = 800;
128static const unsigned kRetvalTLSSize = 800;
129
130// Accesses sizes are powers of two: 1, 2, 4, 8.
131static const size_t kNumberOfAccessSizes = 4;
132
133/// \brief Track origins of uninitialized values.
134///
135/// Adds a section to MemorySanitizer report that points to the allocation
136/// (stack or heap) the uninitialized bits came from originally.
137static cl::opt<int> ClTrackOrigins("msan-track-origins",
138 cl::desc("Track origins (allocation sites) of poisoned memory"),
139 cl::Hidden, cl::init(0));
140static cl::opt<bool> ClKeepGoing("msan-keep-going",
141 cl::desc("keep going after reporting a UMR"),
142 cl::Hidden, cl::init(false));
143static cl::opt<bool> ClPoisonStack("msan-poison-stack",
144 cl::desc("poison uninitialized stack variables"),
145 cl::Hidden, cl::init(true));
146static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
147 cl::desc("poison uninitialized stack variables with a call"),
148 cl::Hidden, cl::init(false));
149static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
150 cl::desc("poison uninitialized stack variables with the given pattern"),
151 cl::Hidden, cl::init(0xff));
152static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
153 cl::desc("poison undef temps"),
154 cl::Hidden, cl::init(true));
155
156static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
157 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
158 cl::Hidden, cl::init(true));
159
160static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
161 cl::desc("exact handling of relational integer ICmp"),
162 cl::Hidden, cl::init(false));
163
164// This flag controls whether we check the shadow of the address
165// operand of load or store. Such bugs are very rare, since load from
166// a garbage address typically results in SEGV, but still happen
167// (e.g. only lower bits of address are garbage, or the access happens
168// early at program startup where malloc-ed memory is more likely to
169// be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
170static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
171 cl::desc("report accesses through a pointer which has poisoned shadow"),
172 cl::Hidden, cl::init(true));
173
174static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
175 cl::desc("print out instructions with default strict semantics"),
176 cl::Hidden, cl::init(false));
177
178static cl::opt<int> ClInstrumentationWithCallThreshold(
179 "msan-instrumentation-with-call-threshold",
180 cl::desc(
181 "If the function being instrumented requires more than "
182 "this number of checks and origin stores, use callbacks instead of "
183 "inline checks (-1 means never use callbacks)."),
184 cl::Hidden, cl::init(3500));
185
186// This is an experiment to enable handling of cases where shadow is a non-zero
187// compile-time constant. For some unexplainable reason they were silently
188// ignored in the instrumentation.
189static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
190 cl::desc("Insert checks for constant shadow values"),
191 cl::Hidden, cl::init(false));
192
193// This is off by default because of a bug in gold:
194// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
195static cl::opt<bool> ClWithComdat("msan-with-comdat",
196 cl::desc("Place MSan constructors in comdat sections"),
197 cl::Hidden, cl::init(false));
198
199static const char *const kMsanModuleCtorName = "msan.module_ctor";
200static const char *const kMsanInitName = "__msan_init";
201
202namespace {
203
204// Memory map parameters used in application-to-shadow address calculation.
205// Offset = (Addr & ~AndMask) ^ XorMask
206// Shadow = ShadowBase + Offset
207// Origin = OriginBase + Offset
208struct MemoryMapParams {
209 uint64_t AndMask;
210 uint64_t XorMask;
211 uint64_t ShadowBase;
212 uint64_t OriginBase;
213};
214
215struct PlatformMemoryMapParams {
216 const MemoryMapParams *bits32;
217 const MemoryMapParams *bits64;
218};
219
220// i386 Linux
221static const MemoryMapParams Linux_I386_MemoryMapParams = {
222 0x000080000000, // AndMask
223 0, // XorMask (not used)
224 0, // ShadowBase (not used)
225 0x000040000000, // OriginBase
226};
227
228// x86_64 Linux
229static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
230#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
231 0x400000000000, // AndMask
232 0, // XorMask (not used)
233 0, // ShadowBase (not used)
234 0x200000000000, // OriginBase
235#else
236 0, // AndMask (not used)
237 0x500000000000, // XorMask
238 0, // ShadowBase (not used)
239 0x100000000000, // OriginBase
240#endif
241};
242
243// mips64 Linux
244static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
245 0, // AndMask (not used)
246 0x008000000000, // XorMask
247 0, // ShadowBase (not used)
248 0x002000000000, // OriginBase
249};
250
251// ppc64 Linux
252static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
253 0x200000000000, // AndMask
254 0x100000000000, // XorMask
255 0x080000000000, // ShadowBase
256 0x1C0000000000, // OriginBase
257};
258
259// aarch64 Linux
260static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
261 0, // AndMask (not used)
262 0x06000000000, // XorMask
263 0, // ShadowBase (not used)
264 0x01000000000, // OriginBase
265};
266
267// i386 FreeBSD
268static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
269 0x000180000000, // AndMask
270 0x000040000000, // XorMask
271 0x000020000000, // ShadowBase
272 0x000700000000, // OriginBase
273};
274
275// x86_64 FreeBSD
276static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
277 0xc00000000000, // AndMask
278 0x200000000000, // XorMask
279 0x100000000000, // ShadowBase
280 0x380000000000, // OriginBase
281};
282
283static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
284 &Linux_I386_MemoryMapParams,
285 &Linux_X86_64_MemoryMapParams,
286};
287
288static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
289 nullptr,
290 &Linux_MIPS64_MemoryMapParams,
291};
292
293static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
294 nullptr,
295 &Linux_PowerPC64_MemoryMapParams,
296};
297
298static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
299 nullptr,
300 &Linux_AArch64_MemoryMapParams,
301};
302
303static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
304 &FreeBSD_I386_MemoryMapParams,
305 &FreeBSD_X86_64_MemoryMapParams,
306};
307
308/// \brief An instrumentation pass implementing detection of uninitialized
309/// reads.
310///
311/// MemorySanitizer: instrument the code in module to find
312/// uninitialized reads.
313class MemorySanitizer : public FunctionPass {
314 public:
315 MemorySanitizer(int TrackOrigins = 0, bool Recover = false)
316 : FunctionPass(ID),
317 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
318 Recover(Recover || ClKeepGoing),
319 WarningFn(nullptr) {}
320 StringRef getPassName() const override { return "MemorySanitizer"; }
321 void getAnalysisUsage(AnalysisUsage &AU) const override {
322 AU.addRequired<TargetLibraryInfoWrapperPass>();
323 }
324 bool runOnFunction(Function &F) override;
325 bool doInitialization(Module &M) override;
326 static char ID; // Pass identification, replacement for typeid.
327
328 private:
329 void initializeCallbacks(Module &M);
330
331 /// \brief Track origins (allocation points) of uninitialized values.
332 int TrackOrigins;
333 bool Recover;
334
335 LLVMContext *C;
336 Type *IntptrTy;
337 Type *OriginTy;
338 /// \brief Thread-local shadow storage for function parameters.
339 GlobalVariable *ParamTLS;
340 /// \brief Thread-local origin storage for function parameters.
341 GlobalVariable *ParamOriginTLS;
342 /// \brief Thread-local shadow storage for function return value.
343 GlobalVariable *RetvalTLS;
344 /// \brief Thread-local origin storage for function return value.
345 GlobalVariable *RetvalOriginTLS;
346 /// \brief Thread-local shadow storage for in-register va_arg function
347 /// parameters (x86_64-specific).
348 GlobalVariable *VAArgTLS;
349 /// \brief Thread-local shadow storage for va_arg overflow area
350 /// (x86_64-specific).
351 GlobalVariable *VAArgOverflowSizeTLS;
352 /// \brief Thread-local space used to pass origin value to the UMR reporting
353 /// function.
354 GlobalVariable *OriginTLS;
355
356 /// \brief The run-time callback to print a warning.
357 Value *WarningFn;
358 // These arrays are indexed by log2(AccessSize).
359 Value *MaybeWarningFn[kNumberOfAccessSizes];
360 Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
361
362 /// \brief Run-time helper that generates a new origin value for a stack
363 /// allocation.
364 Value *MsanSetAllocaOrigin4Fn;
365 /// \brief Run-time helper that poisons stack on function entry.
366 Value *MsanPoisonStackFn;
367 /// \brief Run-time helper that records a store (or any event) of an
368 /// uninitialized value and returns an updated origin id encoding this info.
369 Value *MsanChainOriginFn;
370 /// \brief MSan runtime replacements for memmove, memcpy and memset.
371 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
372
373 /// \brief Memory map parameters used in application-to-shadow calculation.
374 const MemoryMapParams *MapParams;
375
376 MDNode *ColdCallWeights;
377 /// \brief Branch weights for origin store.
378 MDNode *OriginStoreWeights;
379 /// \brief An empty volatile inline asm that prevents callback merge.
380 InlineAsm *EmptyAsm;
381 Function *MsanCtorFunction;
382
383 friend struct MemorySanitizerVisitor;
384 friend struct VarArgAMD64Helper;
385 friend struct VarArgMIPS64Helper;
386 friend struct VarArgAArch64Helper;
387 friend struct VarArgPowerPC64Helper;
388};
389} // anonymous namespace
390
391char MemorySanitizer::ID = 0;
392INITIALIZE_PASS_BEGIN(static void *initializeMemorySanitizerPassOnce(PassRegistry &
Registry) {
393 MemorySanitizer, "msan",static void *initializeMemorySanitizerPassOnce(PassRegistry &
Registry) {
394 "MemorySanitizer: detects uninitialized reads.", false, false)static void *initializeMemorySanitizerPassOnce(PassRegistry &
Registry) {
395INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
396INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "MemorySanitizer: detects uninitialized reads."
, "msan", &MemorySanitizer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySanitizer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySanitizerPassFlag
; void llvm::initializeMemorySanitizerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySanitizerPassFlag
, initializeMemorySanitizerPassOnce, std::ref(Registry)); }
397 MemorySanitizer, "msan",PassInfo *PI = new PassInfo( "MemorySanitizer: detects uninitialized reads."
, "msan", &MemorySanitizer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySanitizer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySanitizerPassFlag
; void llvm::initializeMemorySanitizerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySanitizerPassFlag
, initializeMemorySanitizerPassOnce, std::ref(Registry)); }
398 "MemorySanitizer: detects uninitialized reads.", false, false)PassInfo *PI = new PassInfo( "MemorySanitizer: detects uninitialized reads."
, "msan", &MemorySanitizer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySanitizer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySanitizerPassFlag
; void llvm::initializeMemorySanitizerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySanitizerPassFlag
, initializeMemorySanitizerPassOnce, std::ref(Registry)); }
399
400FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) {
401 return new MemorySanitizer(TrackOrigins, Recover);
402}
403
404/// \brief Create a non-const global initialized with the given string.
405///
406/// Creates a writable global for Str so that we can pass it to the
407/// run-time lib. Runtime uses first 4 bytes of the string to store the
408/// frame ID, so the string needs to be mutable.
409static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
410 StringRef Str) {
411 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
412 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
413 GlobalValue::PrivateLinkage, StrConst, "");
414}
415
416/// \brief Insert extern declaration of runtime-provided functions and globals.
417void MemorySanitizer::initializeCallbacks(Module &M) {
418 // Only do this once.
419 if (WarningFn)
420 return;
421
422 IRBuilder<> IRB(*C);
423 // Create the callback.
424 // FIXME: this function should have "Cold" calling conv,
425 // which is not yet implemented.
426 StringRef WarningFnName = Recover ? "__msan_warning"
427 : "__msan_warning_noreturn";
428 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
429
430 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
431 AccessSizeIndex++) {
432 unsigned AccessSize = 1 << AccessSizeIndex;
433 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
434 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
435 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
436 IRB.getInt32Ty());
437
438 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
439 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
440 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
441 IRB.getInt8PtrTy(), IRB.getInt32Ty());
442 }
443
444 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
445 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
446 IRB.getInt8PtrTy(), IntptrTy);
447 MsanPoisonStackFn =
448 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
449 IRB.getInt8PtrTy(), IntptrTy);
450 MsanChainOriginFn = M.getOrInsertFunction(
451 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
452 MemmoveFn = M.getOrInsertFunction(
453 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
454 IRB.getInt8PtrTy(), IntptrTy);
455 MemcpyFn = M.getOrInsertFunction(
456 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
457 IntptrTy);
458 MemsetFn = M.getOrInsertFunction(
459 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
460 IntptrTy);
461
462 // Create globals.
463 RetvalTLS = new GlobalVariable(
464 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
465 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
466 GlobalVariable::InitialExecTLSModel);
467 RetvalOriginTLS = new GlobalVariable(
468 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
469 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
470
471 ParamTLS = new GlobalVariable(
472 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
473 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
474 GlobalVariable::InitialExecTLSModel);
475 ParamOriginTLS = new GlobalVariable(
476 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
477 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
478 nullptr, GlobalVariable::InitialExecTLSModel);
479
480 VAArgTLS = new GlobalVariable(
481 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
482 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
483 GlobalVariable::InitialExecTLSModel);
484 VAArgOverflowSizeTLS = new GlobalVariable(
485 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
486 "__msan_va_arg_overflow_size_tls", nullptr,
487 GlobalVariable::InitialExecTLSModel);
488 OriginTLS = new GlobalVariable(
489 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
490 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
491
492 // We insert an empty inline asm after __msan_report* to avoid callback merge.
493 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
494 StringRef(""), StringRef(""),
495 /*hasSideEffects=*/true);
496}
497
498/// \brief Module-level initialization.
499///
500/// inserts a call to __msan_init to the module's constructor list.
501bool MemorySanitizer::doInitialization(Module &M) {
502 auto &DL = M.getDataLayout();
503
504 Triple TargetTriple(M.getTargetTriple());
505 switch (TargetTriple.getOS()) {
506 case Triple::FreeBSD:
507 switch (TargetTriple.getArch()) {
508 case Triple::x86_64:
509 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
510 break;
511 case Triple::x86:
512 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
513 break;
514 default:
515 report_fatal_error("unsupported architecture");
516 }
517 break;
518 case Triple::Linux:
519 switch (TargetTriple.getArch()) {
520 case Triple::x86_64:
521 MapParams = Linux_X86_MemoryMapParams.bits64;
522 break;
523 case Triple::x86:
524 MapParams = Linux_X86_MemoryMapParams.bits32;
525 break;
526 case Triple::mips64:
527 case Triple::mips64el:
528 MapParams = Linux_MIPS_MemoryMapParams.bits64;
529 break;
530 case Triple::ppc64:
531 case Triple::ppc64le:
532 MapParams = Linux_PowerPC_MemoryMapParams.bits64;
533 break;
534 case Triple::aarch64:
535 case Triple::aarch64_be:
536 MapParams = Linux_ARM_MemoryMapParams.bits64;
537 break;
538 default:
539 report_fatal_error("unsupported architecture");
540 }
541 break;
542 default:
543 report_fatal_error("unsupported operating system");
544 }
545
546 C = &(M.getContext());
547 IRBuilder<> IRB(*C);
548 IntptrTy = IRB.getIntPtrTy(DL);
549 OriginTy = IRB.getInt32Ty();
550
551 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
552 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
553
554 std::tie(MsanCtorFunction, std::ignore) =
555 createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
556 /*InitArgTypes=*/{},
557 /*InitArgs=*/{});
558 if (ClWithComdat) {
559 Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
560 MsanCtorFunction->setComdat(MsanCtorComdat);
561 appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction);
562 } else {
563 appendToGlobalCtors(M, MsanCtorFunction, 0);
564 }
565
566
567 if (TrackOrigins)
568 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
569 IRB.getInt32(TrackOrigins), "__msan_track_origins");
570
571 if (Recover)
572 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
573 IRB.getInt32(Recover), "__msan_keep_going");
574
575 return true;
576}
577
578namespace {
579
580/// \brief A helper class that handles instrumentation of VarArg
581/// functions on a particular platform.
582///
583/// Implementations are expected to insert the instrumentation
584/// necessary to propagate argument shadow through VarArg function
585/// calls. Visit* methods are called during an InstVisitor pass over
586/// the function, and should avoid creating new basic blocks. A new
587/// instance of this class is created for each instrumented function.
588struct VarArgHelper {
589 /// \brief Visit a CallSite.
590 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
591
592 /// \brief Visit a va_start call.
593 virtual void visitVAStartInst(VAStartInst &I) = 0;
594
595 /// \brief Visit a va_copy call.
596 virtual void visitVACopyInst(VACopyInst &I) = 0;
597
598 /// \brief Finalize function instrumentation.
599 ///
600 /// This method is called after visiting all interesting (see above)
601 /// instructions in a function.
602 virtual void finalizeInstrumentation() = 0;
603
604 virtual ~VarArgHelper() {}
605};
606
607struct MemorySanitizerVisitor;
608
609VarArgHelper*
610CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
611 MemorySanitizerVisitor &Visitor);
612
613unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
614 if (TypeSize <= 8) return 0;
615 return Log2_32_Ceil((TypeSize + 7) / 8);
616}
617
618/// This class does all the work for a given function. Store and Load
619/// instructions store and load corresponding shadow and origin
620/// values. Most instructions propagate shadow from arguments to their
621/// return values. Certain instructions (most importantly, BranchInst)
622/// test their argument shadow and print reports (with a runtime call) if it's
623/// non-zero.
624struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
625 Function &F;
626 MemorySanitizer &MS;
627 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
628 ValueMap<Value*, Value*> ShadowMap, OriginMap;
629 std::unique_ptr<VarArgHelper> VAHelper;
630 const TargetLibraryInfo *TLI;
631
632 // The following flags disable parts of MSan instrumentation based on
633 // blacklist contents and command-line options.
634 bool InsertChecks;
635 bool PropagateShadow;
636 bool PoisonStack;
637 bool PoisonUndef;
638 bool CheckReturnValue;
639
640 struct ShadowOriginAndInsertPoint {
641 Value *Shadow;
642 Value *Origin;
643 Instruction *OrigIns;
644 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
645 : Shadow(S), Origin(O), OrigIns(I) { }
646 };
647 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
648 SmallVector<StoreInst *, 16> StoreList;
649
650 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
651 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
652 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
653 InsertChecks = SanitizeFunction;
654 PropagateShadow = SanitizeFunction;
655 PoisonStack = SanitizeFunction && ClPoisonStack;
656 PoisonUndef = SanitizeFunction && ClPoisonUndef;
657 // FIXME: Consider using SpecialCaseList to specify a list of functions that
658 // must always return fully initialized values. For now, we hardcode "main".
659 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
660 TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
661
662 DEBUG(if (!InsertChecks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n"; } } while (false)
663 dbgs() << "MemorySanitizer is not inserting checks into '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n"; } } while (false)
664 << F.getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n"; } } while (false)
;
665 }
666
667 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
668 if (MS.TrackOrigins <= 1) return V;
669 return IRB.CreateCall(MS.MsanChainOriginFn, V);
670 }
671
672 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
673 const DataLayout &DL = F.getParent()->getDataLayout();
674 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
675 if (IntptrSize == kOriginSize) return Origin;
676 assert(IntptrSize == kOriginSize * 2)((IntptrSize == kOriginSize * 2) ? static_cast<void> (0
) : __assert_fail ("IntptrSize == kOriginSize * 2", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 676, __PRETTY_FUNCTION__))
;
677 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
678 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
679 }
680
681 /// \brief Fill memory range with the given origin value.
682 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
683 unsigned Size, unsigned Alignment) {
684 const DataLayout &DL = F.getParent()->getDataLayout();
685 unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
686 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
687 assert(IntptrAlignment >= kMinOriginAlignment)((IntptrAlignment >= kMinOriginAlignment) ? static_cast<
void> (0) : __assert_fail ("IntptrAlignment >= kMinOriginAlignment"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 687, __PRETTY_FUNCTION__))
;
688 assert(IntptrSize >= kOriginSize)((IntptrSize >= kOriginSize) ? static_cast<void> (0)
: __assert_fail ("IntptrSize >= kOriginSize", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 688, __PRETTY_FUNCTION__))
;
689
690 unsigned Ofs = 0;
691 unsigned CurrentAlignment = Alignment;
692 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
693 Value *IntptrOrigin = originToIntptr(IRB, Origin);
694 Value *IntptrOriginPtr =
695 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
696 for (unsigned i = 0; i < Size / IntptrSize; ++i) {
697 Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
698 : IntptrOriginPtr;
699 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
700 Ofs += IntptrSize / kOriginSize;
701 CurrentAlignment = IntptrAlignment;
702 }
703 }
704
705 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
706 Value *GEP =
707 i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
708 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
709 CurrentAlignment = kMinOriginAlignment;
710 }
711 }
712
713 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
714 unsigned Alignment, bool AsCall) {
715 const DataLayout &DL = F.getParent()->getDataLayout();
716 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
717 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
718 if (Shadow->getType()->isAggregateType()) {
719 paintOrigin(IRB, updateOrigin(Origin, IRB),
720 getOriginPtr(Addr, IRB, Alignment), StoreSize,
721 OriginAlignment);
722 } else {
723 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
724 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
725 if (ConstantShadow) {
726 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
727 paintOrigin(IRB, updateOrigin(Origin, IRB),
728 getOriginPtr(Addr, IRB, Alignment), StoreSize,
729 OriginAlignment);
730 return;
731 }
732
733 unsigned TypeSizeInBits =
734 DL.getTypeSizeInBits(ConvertedShadow->getType());
735 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
736 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
737 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
738 Value *ConvertedShadow2 = IRB.CreateZExt(
739 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
740 IRB.CreateCall(Fn, {ConvertedShadow2,
741 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
742 Origin});
743 } else {
744 Value *Cmp = IRB.CreateICmpNE(
745 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
746 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
747 Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
748 IRBuilder<> IRBNew(CheckTerm);
749 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
750 getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
751 OriginAlignment);
752 }
753 }
754 }
755
756 void materializeStores(bool InstrumentWithCalls) {
757 for (StoreInst *SI : StoreList) {
758 IRBuilder<> IRB(SI);
759 Value *Val = SI->getValueOperand();
760 Value *Addr = SI->getPointerOperand();
761 Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
762 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
763
764 StoreInst *NewSI =
765 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI->getAlignment());
766 DEBUG(dbgs() << " STORE: " << *NewSI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " STORE: " << *NewSI <<
"\n"; } } while (false)
;
767 (void)NewSI;
768
769 if (ClCheckAccessAddress)
770 insertShadowCheck(Addr, SI);
771
772 if (SI->isAtomic())
773 SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
774
775 if (MS.TrackOrigins && !SI->isAtomic())
776 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI->getAlignment(),
777 InstrumentWithCalls);
778 }
779 }
780
781 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
782 bool AsCall) {
783 IRBuilder<> IRB(OrigIns);
784 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " SHAD0 : " << *Shadow <<
"\n"; } } while (false)
;
785 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
786 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " SHAD1 : " << *ConvertedShadow
<< "\n"; } } while (false)
;
787
788 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
789 if (ConstantShadow) {
790 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
791 if (MS.TrackOrigins) {
792 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
793 MS.OriginTLS);
794 }
795 IRB.CreateCall(MS.WarningFn, {});
796 IRB.CreateCall(MS.EmptyAsm, {});
797 // FIXME: Insert UnreachableInst if !MS.Recover?
798 // This may invalidate some of the following checks and needs to be done
799 // at the very end.
800 }
801 return;
802 }
803
804 const DataLayout &DL = OrigIns->getModule()->getDataLayout();
805
806 unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
807 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
808 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
809 Value *Fn = MS.MaybeWarningFn[SizeIndex];
810 Value *ConvertedShadow2 =
811 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
812 IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
813 ? Origin
814 : (Value *)IRB.getInt32(0)});
815 } else {
816 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
817 getCleanShadow(ConvertedShadow), "_mscmp");
818 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
819 Cmp, OrigIns,
820 /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
821
822 IRB.SetInsertPoint(CheckTerm);
823 if (MS.TrackOrigins) {
824 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
825 MS.OriginTLS);
826 }
827 IRB.CreateCall(MS.WarningFn, {});
828 IRB.CreateCall(MS.EmptyAsm, {});
829 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " CHECK: " << *Cmp <<
"\n"; } } while (false)
;
830 }
831 }
832
833 void materializeChecks(bool InstrumentWithCalls) {
834 for (const auto &ShadowData : InstrumentationList) {
835 Instruction *OrigIns = ShadowData.OrigIns;
836 Value *Shadow = ShadowData.Shadow;
837 Value *Origin = ShadowData.Origin;
838 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
839 }
840 DEBUG(dbgs() << "DONE:\n" << F)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "DONE:\n" << F; } } while (
false)
;
841 }
842
843 /// \brief Add MemorySanitizer instrumentation to a function.
844 bool runOnFunction() {
845 MS.initializeCallbacks(*F.getParent());
846
847 // In the presence of unreachable blocks, we may see Phi nodes with
848 // incoming nodes from such blocks. Since InstVisitor skips unreachable
849 // blocks, such nodes will not have any shadow value associated with them.
850 // It's easier to remove unreachable blocks than deal with missing shadow.
851 removeUnreachableBlocks(F);
852
853 // Iterate all BBs in depth-first order and create shadow instructions
854 // for all instructions (where applicable).
855 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
856 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
857 visit(*BB);
858
859
860 // Finalize PHI nodes.
861 for (PHINode *PN : ShadowPHINodes) {
862 PHINode *PNS = cast<PHINode>(getShadow(PN));
863 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
864 size_t NumValues = PN->getNumIncomingValues();
865 for (size_t v = 0; v < NumValues; v++) {
866 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
867 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
868 }
869 }
870
871 VAHelper->finalizeInstrumentation();
872
873 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
874 InstrumentationList.size() + StoreList.size() >
875 (unsigned)ClInstrumentationWithCallThreshold;
876
877 // Delayed instrumentation of StoreInst.
878 // This may add new checks to be inserted later.
879 materializeStores(InstrumentWithCalls);
880
881 // Insert shadow value checks.
882 materializeChecks(InstrumentWithCalls);
883
884 return true;
885 }
886
887 /// \brief Compute the shadow type that corresponds to a given Value.
888 Type *getShadowTy(Value *V) {
889 return getShadowTy(V->getType());
890 }
891
892 /// \brief Compute the shadow type that corresponds to a given Type.
893 Type *getShadowTy(Type *OrigTy) {
894 if (!OrigTy->isSized()) {
895 return nullptr;
896 }
897 // For integer type, shadow is the same as the original type.
898 // This may return weird-sized types like i1.
899 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
900 return IT;
901 const DataLayout &DL = F.getParent()->getDataLayout();
902 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
903 uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
904 return VectorType::get(IntegerType::get(*MS.C, EltSize),
905 VT->getNumElements());
906 }
907 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
908 return ArrayType::get(getShadowTy(AT->getElementType()),
909 AT->getNumElements());
910 }
911 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
912 SmallVector<Type*, 4> Elements;
913 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
914 Elements.push_back(getShadowTy(ST->getElementType(i)));
915 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
916 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "getShadowTy: " << *ST <<
" ===> " << *Res << "\n"; } } while (false)
;
917 return Res;
918 }
919 uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
920 return IntegerType::get(*MS.C, TypeSize);
921 }
922
923 /// \brief Flatten a vector type.
924 Type *getShadowTyNoVec(Type *ty) {
925 if (VectorType *vt = dyn_cast<VectorType>(ty))
926 return IntegerType::get(*MS.C, vt->getBitWidth());
927 return ty;
928 }
929
930 /// \brief Convert a shadow value to it's flattened variant.
931 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
932 Type *Ty = V->getType();
933 Type *NoVecTy = getShadowTyNoVec(Ty);
934 if (Ty == NoVecTy) return V;
935 return IRB.CreateBitCast(V, NoVecTy);
936 }
937
938 /// \brief Compute the integer shadow offset that corresponds to a given
939 /// application address.
940 ///
941 /// Offset = (Addr & ~AndMask) ^ XorMask
942 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
943 Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
944
945 uint64_t AndMask = MS.MapParams->AndMask;
946 if (AndMask)
947 OffsetLong =
948 IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
949
950 uint64_t XorMask = MS.MapParams->XorMask;
951 if (XorMask)
952 OffsetLong =
953 IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
954 return OffsetLong;
955 }
956
957 /// \brief Compute the shadow address that corresponds to a given application
958 /// address.
959 ///
960 /// Shadow = ShadowBase + Offset
961 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
962 IRBuilder<> &IRB) {
963 Value *ShadowLong = getShadowPtrOffset(Addr, IRB);
964 uint64_t ShadowBase = MS.MapParams->ShadowBase;
965 if (ShadowBase != 0)
966 ShadowLong =
967 IRB.CreateAdd(ShadowLong,
968 ConstantInt::get(MS.IntptrTy, ShadowBase));
969 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
970 }
971
972 /// \brief Compute the origin address that corresponds to a given application
973 /// address.
974 ///
975 /// OriginAddr = (OriginBase + Offset) & ~3ULL
976 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) {
977 Value *OriginLong = getShadowPtrOffset(Addr, IRB);
978 uint64_t OriginBase = MS.MapParams->OriginBase;
979 if (OriginBase != 0)
980 OriginLong =
981 IRB.CreateAdd(OriginLong,
982 ConstantInt::get(MS.IntptrTy, OriginBase));
983 if (Alignment < kMinOriginAlignment) {
984 uint64_t Mask = kMinOriginAlignment - 1;
985 OriginLong = IRB.CreateAnd(OriginLong,
986 ConstantInt::get(MS.IntptrTy, ~Mask));
987 }
988 return IRB.CreateIntToPtr(OriginLong,
989 PointerType::get(IRB.getInt32Ty(), 0));
990 }
991
992 /// \brief Compute the shadow address for a given function argument.
993 ///
994 /// Shadow = ParamTLS+ArgOffset.
995 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
996 int ArgOffset) {
997 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
998 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
999 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1000 "_msarg");
1001 }
1002
1003 /// \brief Compute the origin address for a given function argument.
1004 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1005 int ArgOffset) {
1006 if (!MS.TrackOrigins) return nullptr;
1007 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1008 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1009 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1010 "_msarg_o");
1011 }
1012
1013 /// \brief Compute the shadow address for a retval.
1014 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1015 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
1016 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1017 "_msret");
1018 }
1019
1020 /// \brief Compute the origin address for a retval.
1021 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1022 // We keep a single origin for the entire retval. Might be too optimistic.
1023 return MS.RetvalOriginTLS;
1024 }
1025
1026 /// \brief Set SV to be the shadow value for V.
1027 void setShadow(Value *V, Value *SV) {
1028 assert(!ShadowMap.count(V) && "Values may only have one shadow")((!ShadowMap.count(V) && "Values may only have one shadow"
) ? static_cast<void> (0) : __assert_fail ("!ShadowMap.count(V) && \"Values may only have one shadow\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1028, __PRETTY_FUNCTION__))
;
1029 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1030 }
1031
1032 /// \brief Set Origin to be the origin value for V.
1033 void setOrigin(Value *V, Value *Origin) {
1034 if (!MS.TrackOrigins) return;
1035 assert(!OriginMap.count(V) && "Values may only have one origin")((!OriginMap.count(V) && "Values may only have one origin"
) ? static_cast<void> (0) : __assert_fail ("!OriginMap.count(V) && \"Values may only have one origin\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1035, __PRETTY_FUNCTION__))
;
1036 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "ORIGIN: " << *V << " ==> "
<< *Origin << "\n"; } } while (false)
;
1037 OriginMap[V] = Origin;
1038 }
1039
1040 Constant *getCleanShadow(Type *OrigTy) {
1041 Type *ShadowTy = getShadowTy(OrigTy);
1042 if (!ShadowTy)
1043 return nullptr;
1044 return Constant::getNullValue(ShadowTy);
1045 }
1046
1047 /// \brief Create a clean shadow value for a given value.
1048 ///
1049 /// Clean shadow (all zeroes) means all bits of the value are defined
1050 /// (initialized).
1051 Constant *getCleanShadow(Value *V) {
1052 return getCleanShadow(V->getType());
1053 }
1054
1055 /// \brief Create a dirty shadow of a given shadow type.
1056 Constant *getPoisonedShadow(Type *ShadowTy) {
1057 assert(ShadowTy)((ShadowTy) ? static_cast<void> (0) : __assert_fail ("ShadowTy"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1057, __PRETTY_FUNCTION__))
;
1058 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1059 return Constant::getAllOnesValue(ShadowTy);
1060 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1061 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1062 getPoisonedShadow(AT->getElementType()));
1063 return ConstantArray::get(AT, Vals);
1064 }
1065 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1066 SmallVector<Constant *, 4> Vals;
1067 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1068 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1069 return ConstantStruct::get(ST, Vals);
1070 }
1071 llvm_unreachable("Unexpected shadow type")::llvm::llvm_unreachable_internal("Unexpected shadow type", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1071)
;
1072 }
1073
1074 /// \brief Create a dirty shadow for a given value.
1075 Constant *getPoisonedShadow(Value *V) {
1076 Type *ShadowTy = getShadowTy(V);
1077 if (!ShadowTy)
1078 return nullptr;
1079 return getPoisonedShadow(ShadowTy);
1080 }
1081
1082 /// \brief Create a clean (zero) origin.
1083 Value *getCleanOrigin() {
1084 return Constant::getNullValue(MS.OriginTy);
1085 }
1086
1087 /// \brief Get the shadow value for a given Value.
1088 ///
1089 /// This function either returns the value set earlier with setShadow,
1090 /// or extracts if from ParamTLS (for function arguments).
1091 Value *getShadow(Value *V) {
1092 if (!PropagateShadow) return getCleanShadow(V);
1
Assuming the condition is false
2
Taking false branch
1093 if (Instruction *I = dyn_cast<Instruction>(V)) {
3
Taking false branch
1094 // For instructions the shadow is already stored in the map.
1095 Value *Shadow = ShadowMap[V];
1096 if (!Shadow) {
1097 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "No shadow: " << *V <<
"\n" << *(I->getParent()); } } while (false)
;
1098 (void)I;
1099 assert(Shadow && "No shadow for a value")((Shadow && "No shadow for a value") ? static_cast<
void> (0) : __assert_fail ("Shadow && \"No shadow for a value\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1099, __PRETTY_FUNCTION__))
;
1100 }
1101 return Shadow;
1102 }
1103 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
4
Taking false branch
1104 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1105 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Undef: " << *U << " ==> "
<< *AllOnes << "\n"; } } while (false)
;
1106 (void)U;
1107 return AllOnes;
1108 }
1109 if (Argument *A = dyn_cast<Argument>(V)) {
5
Assuming 'A' is non-null
6
Taking true branch
1110 // For arguments we compute the shadow on demand and store it in the map.
1111 Value **ShadowPtr = &ShadowMap[V];
1112 if (*ShadowPtr)
7
Assuming the condition is false
8
Taking false branch
1113 return *ShadowPtr;
1114 Function *F = A->getParent();
1115 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
1116 unsigned ArgOffset = 0;
1117 const DataLayout &DL = F->getParent()->getDataLayout();
1118 for (auto &FArg : F->args()) {
9
Assuming '__begin' is not equal to '__end'
1119 if (!FArg.getType()->isSized()) {
10
Assuming the condition is false
11
Taking false branch
16
Assuming the condition is false
17
Taking false branch
21
Assuming the condition is false
22
Taking false branch
26
Assuming the condition is false
27
Taking false branch
1120 DEBUG(dbgs() << "Arg is not sized\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Arg is not sized\n"; } } while (
false)
;
1121 continue;
1122 }
1123 unsigned Size =
1124 FArg.hasByValAttr()
12
Assuming the condition is false
13
'?' condition is false
18
Assuming the condition is false
19
'?' condition is false
23
Assuming the condition is false
24
'?' condition is false
28
Assuming the condition is false
29
'?' condition is false
1125 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1126 : DL.getTypeAllocSize(FArg.getType());
1127 if (A == &FArg) {
14
Assuming the condition is false
15
Taking false branch
20
Taking false branch
25
Taking false branch
30
Taking true branch
1128 bool Overflow = ArgOffset + Size > kParamTLSSize;
1129 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1130 if (FArg.hasByValAttr()) {
31
Assuming the condition is false
32
Taking false branch
1131 // ByVal pointer itself has clean shadow. We copy the actual
1132 // argument shadow to the underlying memory.
1133 // Figure out maximal valid memcpy alignment.
1134 unsigned ArgAlign = FArg.getParamAlignment();
1135 if (ArgAlign == 0) {
1136 Type *EltType = A->getType()->getPointerElementType();
1137 ArgAlign = DL.getABITypeAlignment(EltType);
1138 }
1139 if (Overflow) {
1140 // ParamTLS overflow.
1141 EntryIRB.CreateMemSet(
1142 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
1143 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
1144 } else {
1145 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1146 Value *Cpy = EntryIRB.CreateMemCpy(
1147 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
1148 CopyAlign);
1149 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ByValCpy: " << *Cpy <<
"\n"; } } while (false)
;
1150 (void)Cpy;
1151 }
1152 *ShadowPtr = getCleanShadow(V);
1153 } else {
1154 if (Overflow) {
33
Assuming 'Overflow' is not equal to 0
34
Taking true branch
1155 // ParamTLS overflow.
1156 *ShadowPtr = getCleanShadow(V);
1157 } else {
1158 *ShadowPtr =
1159 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1160 }
1161 }
1162 DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ARG: " << FArg <<
" ==> " << **ShadowPtr << "\n"; } } while (false
)
35
Within the expansion of the macro 'DEBUG':
a
Assuming 'DebugFlag' is not equal to 0
b
Assuming the condition is true
c
Forming reference to null pointer
1163 **ShadowPtr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ARG: " << FArg <<
" ==> " << **ShadowPtr << "\n"; } } while (false
)
;
1164 if (MS.TrackOrigins && !Overflow) {
1165 Value *OriginPtr =
1166 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1167 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1168 } else {
1169 setOrigin(A, getCleanOrigin());
1170 }
1171 }
1172 ArgOffset += alignTo(Size, kShadowTLSAlignment);
1173 }
1174 assert(*ShadowPtr && "Could not find shadow for an argument")((*ShadowPtr && "Could not find shadow for an argument"
) ? static_cast<void> (0) : __assert_fail ("*ShadowPtr && \"Could not find shadow for an argument\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1174, __PRETTY_FUNCTION__))
;
1175 return *ShadowPtr;
1176 }
1177 // For everything else the shadow is zero.
1178 return getCleanShadow(V);
1179 }
1180
1181 /// \brief Get the shadow for i-th argument of the instruction I.
1182 Value *getShadow(Instruction *I, int i) {
1183 return getShadow(I->getOperand(i));
1184 }
1185
1186 /// \brief Get the origin for a value.
1187 Value *getOrigin(Value *V) {
1188 if (!MS.TrackOrigins) return nullptr;
1189 if (!PropagateShadow) return getCleanOrigin();
1190 if (isa<Constant>(V)) return getCleanOrigin();
1191 assert((isa<Instruction>(V) || isa<Argument>(V)) &&(((isa<Instruction>(V) || isa<Argument>(V)) &&
"Unexpected value type in getOrigin()") ? static_cast<void
> (0) : __assert_fail ("(isa<Instruction>(V) || isa<Argument>(V)) && \"Unexpected value type in getOrigin()\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1192, __PRETTY_FUNCTION__))
1192 "Unexpected value type in getOrigin()")(((isa<Instruction>(V) || isa<Argument>(V)) &&
"Unexpected value type in getOrigin()") ? static_cast<void
> (0) : __assert_fail ("(isa<Instruction>(V) || isa<Argument>(V)) && \"Unexpected value type in getOrigin()\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1192, __PRETTY_FUNCTION__))
;
1193 Value *Origin = OriginMap[V];
1194 assert(Origin && "Missing origin")((Origin && "Missing origin") ? static_cast<void>
(0) : __assert_fail ("Origin && \"Missing origin\"",
"/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1194, __PRETTY_FUNCTION__))
;
1195 return Origin;
1196 }
1197
1198 /// \brief Get the origin for i-th argument of the instruction I.
1199 Value *getOrigin(Instruction *I, int i) {
1200 return getOrigin(I->getOperand(i));
1201 }
1202
1203 /// \brief Remember the place where a shadow check should be inserted.
1204 ///
1205 /// This location will be later instrumented with a check that will print a
1206 /// UMR warning in runtime if the shadow value is not 0.
1207 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1208 assert(Shadow)((Shadow) ? static_cast<void> (0) : __assert_fail ("Shadow"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1208, __PRETTY_FUNCTION__))
;
1209 if (!InsertChecks) return;
1210#ifndef NDEBUG
1211 Type *ShadowTy = Shadow->getType();
1212 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&(((isa<IntegerType>(ShadowTy) || isa<VectorType>(
ShadowTy)) && "Can only insert checks for integer and vector shadow types"
) ? static_cast<void> (0) : __assert_fail ("(isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && \"Can only insert checks for integer and vector shadow types\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1213, __PRETTY_FUNCTION__))
1213 "Can only insert checks for integer and vector shadow types")(((isa<IntegerType>(ShadowTy) || isa<VectorType>(
ShadowTy)) && "Can only insert checks for integer and vector shadow types"
) ? static_cast<void> (0) : __assert_fail ("(isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && \"Can only insert checks for integer and vector shadow types\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1213, __PRETTY_FUNCTION__))
;
1214#endif
1215 InstrumentationList.push_back(
1216 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1217 }
1218
1219 /// \brief Remember the place where a shadow check should be inserted.
1220 ///
1221 /// This location will be later instrumented with a check that will print a
1222 /// UMR warning in runtime if the value is not fully defined.
1223 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1224 assert(Val)((Val) ? static_cast<void> (0) : __assert_fail ("Val", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1224, __PRETTY_FUNCTION__))
;
1225 Value *Shadow, *Origin;
1226 if (ClCheckConstantShadow) {
1227 Shadow = getShadow(Val);
1228 if (!Shadow) return;
1229 Origin = getOrigin(Val);
1230 } else {
1231 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1232 if (!Shadow) return;
1233 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1234 }
1235 insertShadowCheck(Shadow, Origin, OrigIns);
1236 }
1237
1238 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1239 switch (a) {
1240 case AtomicOrdering::NotAtomic:
1241 return AtomicOrdering::NotAtomic;
1242 case AtomicOrdering::Unordered:
1243 case AtomicOrdering::Monotonic:
1244 case AtomicOrdering::Release:
1245 return AtomicOrdering::Release;
1246 case AtomicOrdering::Acquire:
1247 case AtomicOrdering::AcquireRelease:
1248 return AtomicOrdering::AcquireRelease;
1249 case AtomicOrdering::SequentiallyConsistent:
1250 return AtomicOrdering::SequentiallyConsistent;
1251 }
1252 llvm_unreachable("Unknown ordering")::llvm::llvm_unreachable_internal("Unknown ordering", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1252)
;
1253 }
1254
1255 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1256 switch (a) {
1257 case AtomicOrdering::NotAtomic:
1258 return AtomicOrdering::NotAtomic;
1259 case AtomicOrdering::Unordered:
1260 case AtomicOrdering::Monotonic:
1261 case AtomicOrdering::Acquire:
1262 return AtomicOrdering::Acquire;
1263 case AtomicOrdering::Release:
1264 case AtomicOrdering::AcquireRelease:
1265 return AtomicOrdering::AcquireRelease;
1266 case AtomicOrdering::SequentiallyConsistent:
1267 return AtomicOrdering::SequentiallyConsistent;
1268 }
1269 llvm_unreachable("Unknown ordering")::llvm::llvm_unreachable_internal("Unknown ordering", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1269)
;
1270 }
1271
1272 // ------------------- Visitors.
1273
1274 /// \brief Instrument LoadInst
1275 ///
1276 /// Loads the corresponding shadow and (optionally) origin.
1277 /// Optionally, checks that the load address is fully defined.
1278 void visitLoadInst(LoadInst &I) {
1279 assert(I.getType()->isSized() && "Load type must have size")((I.getType()->isSized() && "Load type must have size"
) ? static_cast<void> (0) : __assert_fail ("I.getType()->isSized() && \"Load type must have size\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1279, __PRETTY_FUNCTION__))
;
1280 IRBuilder<> IRB(I.getNextNode());
1281 Type *ShadowTy = getShadowTy(&I);
1282 Value *Addr = I.getPointerOperand();
1283 if (PropagateShadow && !I.getMetadata("nosanitize")) {
1284 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1285 setShadow(&I,
1286 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1287 } else {
1288 setShadow(&I, getCleanShadow(&I));
1289 }
1290
1291 if (ClCheckAccessAddress)
1292 insertShadowCheck(I.getPointerOperand(), &I);
1293
1294 if (I.isAtomic())
1295 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1296
1297 if (MS.TrackOrigins) {
1298 if (PropagateShadow) {
1299 unsigned Alignment = I.getAlignment();
1300 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1301 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment),
1302 OriginAlignment));
1303 } else {
1304 setOrigin(&I, getCleanOrigin());
1305 }
1306 }
1307 }
1308
1309 /// \brief Instrument StoreInst
1310 ///
1311 /// Stores the corresponding shadow and (optionally) origin.
1312 /// Optionally, checks that the store address is fully defined.
1313 void visitStoreInst(StoreInst &I) {
1314 StoreList.push_back(&I);
1315 }
1316
1317 void handleCASOrRMW(Instruction &I) {
1318 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I))((isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>
(I)) ? static_cast<void> (0) : __assert_fail ("isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1318, __PRETTY_FUNCTION__))
;
1319
1320 IRBuilder<> IRB(&I);
1321 Value *Addr = I.getOperand(0);
1322 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1323
1324 if (ClCheckAccessAddress)
1325 insertShadowCheck(Addr, &I);
1326
1327 // Only test the conditional argument of cmpxchg instruction.
1328 // The other argument can potentially be uninitialized, but we can not
1329 // detect this situation reliably without possible false positives.
1330 if (isa<AtomicCmpXchgInst>(I))
1331 insertShadowCheck(I.getOperand(1), &I);
1332
1333 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1334
1335 setShadow(&I, getCleanShadow(&I));
1336 setOrigin(&I, getCleanOrigin());
1337 }
1338
1339 void visitAtomicRMWInst(AtomicRMWInst &I) {
1340 handleCASOrRMW(I);
1341 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1342 }
1343
1344 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1345 handleCASOrRMW(I);
1346 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1347 }
1348
1349 // Vector manipulation.
1350 void visitExtractElementInst(ExtractElementInst &I) {
1351 insertShadowCheck(I.getOperand(1), &I);
1352 IRBuilder<> IRB(&I);
1353 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1354 "_msprop"));
1355 setOrigin(&I, getOrigin(&I, 0));
1356 }
1357
1358 void visitInsertElementInst(InsertElementInst &I) {
1359 insertShadowCheck(I.getOperand(2), &I);
1360 IRBuilder<> IRB(&I);
1361 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1362 I.getOperand(2), "_msprop"));
1363 setOriginForNaryOp(I);
1364 }
1365
1366 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1367 insertShadowCheck(I.getOperand(2), &I);
1368 IRBuilder<> IRB(&I);
1369 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1370 I.getOperand(2), "_msprop"));
1371 setOriginForNaryOp(I);
1372 }
1373
1374 // Casts.
1375 void visitSExtInst(SExtInst &I) {
1376 IRBuilder<> IRB(&I);
1377 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1378 setOrigin(&I, getOrigin(&I, 0));
1379 }
1380
1381 void visitZExtInst(ZExtInst &I) {
1382 IRBuilder<> IRB(&I);
1383 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1384 setOrigin(&I, getOrigin(&I, 0));
1385 }
1386
1387 void visitTruncInst(TruncInst &I) {
1388 IRBuilder<> IRB(&I);
1389 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1390 setOrigin(&I, getOrigin(&I, 0));
1391 }
1392
1393 void visitBitCastInst(BitCastInst &I) {
1394 // Special case: if this is the bitcast (there is exactly 1 allowed) between
1395 // a musttail call and a ret, don't instrument. New instructions are not
1396 // allowed after a musttail call.
1397 if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1398 if (CI->isMustTailCall())
1399 return;
1400 IRBuilder<> IRB(&I);
1401 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1402 setOrigin(&I, getOrigin(&I, 0));
1403 }
1404
1405 void visitPtrToIntInst(PtrToIntInst &I) {
1406 IRBuilder<> IRB(&I);
1407 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1408 "_msprop_ptrtoint"));
1409 setOrigin(&I, getOrigin(&I, 0));
1410 }
1411
1412 void visitIntToPtrInst(IntToPtrInst &I) {
1413 IRBuilder<> IRB(&I);
1414 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1415 "_msprop_inttoptr"));
1416 setOrigin(&I, getOrigin(&I, 0));
1417 }
1418
1419 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1420 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1421 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1422 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1423 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1424 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1425
1426 /// \brief Propagate shadow for bitwise AND.
1427 ///
1428 /// This code is exact, i.e. if, for example, a bit in the left argument
1429 /// is defined and 0, then neither the value not definedness of the
1430 /// corresponding bit in B don't affect the resulting shadow.
1431 void visitAnd(BinaryOperator &I) {
1432 IRBuilder<> IRB(&I);
1433 // "And" of 0 and a poisoned value results in unpoisoned value.
1434 // 1&1 => 1; 0&1 => 0; p&1 => p;
1435 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1436 // 1&p => p; 0&p => 0; p&p => p;
1437 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1438 Value *S1 = getShadow(&I, 0);
1439 Value *S2 = getShadow(&I, 1);
1440 Value *V1 = I.getOperand(0);
1441 Value *V2 = I.getOperand(1);
1442 if (V1->getType() != S1->getType()) {
1443 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1444 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1445 }
1446 Value *S1S2 = IRB.CreateAnd(S1, S2);
1447 Value *V1S2 = IRB.CreateAnd(V1, S2);
1448 Value *S1V2 = IRB.CreateAnd(S1, V2);
1449 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1450 setOriginForNaryOp(I);
1451 }
1452
1453 void visitOr(BinaryOperator &I) {
1454 IRBuilder<> IRB(&I);
1455 // "Or" of 1 and a poisoned value results in unpoisoned value.
1456 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1457 // 1|0 => 1; 0|0 => 0; p|0 => p;
1458 // 1|p => 1; 0|p => p; p|p => p;
1459 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1460 Value *S1 = getShadow(&I, 0);
1461 Value *S2 = getShadow(&I, 1);
1462 Value *V1 = IRB.CreateNot(I.getOperand(0));
1463 Value *V2 = IRB.CreateNot(I.getOperand(1));
1464 if (V1->getType() != S1->getType()) {
1465 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1466 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1467 }
1468 Value *S1S2 = IRB.CreateAnd(S1, S2);
1469 Value *V1S2 = IRB.CreateAnd(V1, S2);
1470 Value *S1V2 = IRB.CreateAnd(S1, V2);
1471 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1472 setOriginForNaryOp(I);
1473 }
1474
1475 /// \brief Default propagation of shadow and/or origin.
1476 ///
1477 /// This class implements the general case of shadow propagation, used in all
1478 /// cases where we don't know and/or don't care about what the operation
1479 /// actually does. It converts all input shadow values to a common type
1480 /// (extending or truncating as necessary), and bitwise OR's them.
1481 ///
1482 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1483 /// fully initialized), and less prone to false positives.
1484 ///
1485 /// This class also implements the general case of origin propagation. For a
1486 /// Nary operation, result origin is set to the origin of an argument that is
1487 /// not entirely initialized. If there is more than one such arguments, the
1488 /// rightmost of them is picked. It does not matter which one is picked if all
1489 /// arguments are initialized.
1490 template <bool CombineShadow>
1491 class Combiner {
1492 Value *Shadow;
1493 Value *Origin;
1494 IRBuilder<> &IRB;
1495 MemorySanitizerVisitor *MSV;
1496
1497 public:
1498 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1499 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1500
1501 /// \brief Add a pair of shadow and origin values to the mix.
1502 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1503 if (CombineShadow) {
1504 assert(OpShadow)((OpShadow) ? static_cast<void> (0) : __assert_fail ("OpShadow"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1504, __PRETTY_FUNCTION__))
;
1505 if (!Shadow)
1506 Shadow = OpShadow;
1507 else {
1508 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1509 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1510 }
1511 }
1512
1513 if (MSV->MS.TrackOrigins) {
1514 assert(OpOrigin)((OpOrigin) ? static_cast<void> (0) : __assert_fail ("OpOrigin"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1514, __PRETTY_FUNCTION__))
;
1515 if (!Origin) {
1516 Origin = OpOrigin;
1517 } else {
1518 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
1519 // No point in adding something that might result in 0 origin value.
1520 if (!ConstOrigin || !ConstOrigin->isNullValue()) {
1521 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1522 Value *Cond =
1523 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1524 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1525 }
1526 }
1527 }
1528 return *this;
1529 }
1530
1531 /// \brief Add an application value to the mix.
1532 Combiner &Add(Value *V) {
1533 Value *OpShadow = MSV->getShadow(V);
1534 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1535 return Add(OpShadow, OpOrigin);
1536 }
1537
1538 /// \brief Set the current combined values as the given instruction's shadow
1539 /// and origin.
1540 void Done(Instruction *I) {
1541 if (CombineShadow) {
1542 assert(Shadow)((Shadow) ? static_cast<void> (0) : __assert_fail ("Shadow"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1542, __PRETTY_FUNCTION__))
;
1543 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1544 MSV->setShadow(I, Shadow);
1545 }
1546 if (MSV->MS.TrackOrigins) {
1547 assert(Origin)((Origin) ? static_cast<void> (0) : __assert_fail ("Origin"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1547, __PRETTY_FUNCTION__))
;
1548 MSV->setOrigin(I, Origin);
1549 }
1550 }
1551 };
1552
1553 typedef Combiner<true> ShadowAndOriginCombiner;
1554 typedef Combiner<false> OriginCombiner;
1555
1556 /// \brief Propagate origin for arbitrary operation.
1557 void setOriginForNaryOp(Instruction &I) {
1558 if (!MS.TrackOrigins) return;
1559 IRBuilder<> IRB(&I);
1560 OriginCombiner OC(this, IRB);
1561 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1562 OC.Add(OI->get());
1563 OC.Done(&I);
1564 }
1565
1566 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1567 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&((!(Ty->isVectorTy() && Ty->getScalarType()->
isPointerTy()) && "Vector of pointers is not a valid shadow type"
) ? static_cast<void> (0) : __assert_fail ("!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && \"Vector of pointers is not a valid shadow type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1568, __PRETTY_FUNCTION__))
1568 "Vector of pointers is not a valid shadow type")((!(Ty->isVectorTy() && Ty->getScalarType()->
isPointerTy()) && "Vector of pointers is not a valid shadow type"
) ? static_cast<void> (0) : __assert_fail ("!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && \"Vector of pointers is not a valid shadow type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1568, __PRETTY_FUNCTION__))
;
1569 return Ty->isVectorTy() ?
1570 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1571 Ty->getPrimitiveSizeInBits();
1572 }
1573
1574 /// \brief Cast between two shadow types, extending or truncating as
1575 /// necessary.
1576 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1577 bool Signed = false) {
1578 Type *srcTy = V->getType();
1579 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1580 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1581 if (srcSizeInBits > 1 && dstSizeInBits == 1)
1582 return IRB.CreateICmpNE(V, getCleanShadow(V));
1583
1584 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1585 return IRB.CreateIntCast(V, dstTy, Signed);
1586 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1587 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1588 return IRB.CreateIntCast(V, dstTy, Signed);
1589 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1590 Value *V2 =
1591 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1592 return IRB.CreateBitCast(V2, dstTy);
1593 // TODO: handle struct types.
1594 }
1595
1596 /// \brief Cast an application value to the type of its own shadow.
1597 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1598 Type *ShadowTy = getShadowTy(V);
1599 if (V->getType() == ShadowTy)
1600 return V;
1601 if (V->getType()->isPtrOrPtrVectorTy())
1602 return IRB.CreatePtrToInt(V, ShadowTy);
1603 else
1604 return IRB.CreateBitCast(V, ShadowTy);
1605 }
1606
1607 /// \brief Propagate shadow for arbitrary operation.
1608 void handleShadowOr(Instruction &I) {
1609 IRBuilder<> IRB(&I);
1610 ShadowAndOriginCombiner SC(this, IRB);
1611 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1612 SC.Add(OI->get());
1613 SC.Done(&I);
1614 }
1615
1616 // \brief Handle multiplication by constant.
1617 //
1618 // Handle a special case of multiplication by constant that may have one or
1619 // more zeros in the lower bits. This makes corresponding number of lower bits
1620 // of the result zero as well. We model it by shifting the other operand
1621 // shadow left by the required number of bits. Effectively, we transform
1622 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
1623 // We use multiplication by 2**N instead of shift to cover the case of
1624 // multiplication by 0, which may occur in some elements of a vector operand.
1625 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
1626 Value *OtherArg) {
1627 Constant *ShadowMul;
1628 Type *Ty = ConstArg->getType();
1629 if (Ty->isVectorTy()) {
1630 unsigned NumElements = Ty->getVectorNumElements();
1631 Type *EltTy = Ty->getSequentialElementType();
1632 SmallVector<Constant *, 16> Elements;
1633 for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
1634 if (ConstantInt *Elt =
1635 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
1636 const APInt &V = Elt->getValue();
1637 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1638 Elements.push_back(ConstantInt::get(EltTy, V2));
1639 } else {
1640 Elements.push_back(ConstantInt::get(EltTy, 1));
1641 }
1642 }
1643 ShadowMul = ConstantVector::get(Elements);
1644 } else {
1645 if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
1646 const APInt &V = Elt->getValue();
1647 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1648 ShadowMul = ConstantInt::get(Ty, V2);
1649 } else {
1650 ShadowMul = ConstantInt::get(Ty, 1);
1651 }
1652 }
1653
1654 IRBuilder<> IRB(&I);
1655 setShadow(&I,
1656 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
1657 setOrigin(&I, getOrigin(OtherArg));
1658 }
1659
1660 void visitMul(BinaryOperator &I) {
1661 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1662 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1663 if (constOp0 && !constOp1)
1664 handleMulByConstant(I, constOp0, I.getOperand(1));
1665 else if (constOp1 && !constOp0)
1666 handleMulByConstant(I, constOp1, I.getOperand(0));
1667 else
1668 handleShadowOr(I);
1669 }
1670
1671 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1672 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1673 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1674 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1675 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1676 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1677
1678 void handleDiv(Instruction &I) {
1679 IRBuilder<> IRB(&I);
1680 // Strict on the second argument.
1681 insertShadowCheck(I.getOperand(1), &I);
1682 setShadow(&I, getShadow(&I, 0));
1683 setOrigin(&I, getOrigin(&I, 0));
1684 }
1685
1686 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1687 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1688 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1689 void visitURem(BinaryOperator &I) { handleDiv(I); }
1690 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1691 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1692
1693 /// \brief Instrument == and != comparisons.
1694 ///
1695 /// Sometimes the comparison result is known even if some of the bits of the
1696 /// arguments are not.
1697 void handleEqualityComparison(ICmpInst &I) {
1698 IRBuilder<> IRB(&I);
1699 Value *A = I.getOperand(0);
1700 Value *B = I.getOperand(1);
1701 Value *Sa = getShadow(A);
1702 Value *Sb = getShadow(B);
1703
1704 // Get rid of pointers and vectors of pointers.
1705 // For ints (and vectors of ints), types of A and Sa match,
1706 // and this is a no-op.
1707 A = IRB.CreatePointerCast(A, Sa->getType());
1708 B = IRB.CreatePointerCast(B, Sb->getType());
1709
1710 // A == B <==> (C = A^B) == 0
1711 // A != B <==> (C = A^B) != 0
1712 // Sc = Sa | Sb
1713 Value *C = IRB.CreateXor(A, B);
1714 Value *Sc = IRB.CreateOr(Sa, Sb);
1715 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1716 // Result is defined if one of the following is true
1717 // * there is a defined 1 bit in C
1718 // * C is fully defined
1719 // Si = !(C & ~Sc) && Sc
1720 Value *Zero = Constant::getNullValue(Sc->getType());
1721 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1722 Value *Si =
1723 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1724 IRB.CreateICmpEQ(
1725 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1726 Si->setName("_msprop_icmp");
1727 setShadow(&I, Si);
1728 setOriginForNaryOp(I);
1729 }
1730
1731 /// \brief Build the lowest possible value of V, taking into account V's
1732 /// uninitialized bits.
1733 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1734 bool isSigned) {
1735 if (isSigned) {
1736 // Split shadow into sign bit and other bits.
1737 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1738 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1739 // Maximise the undefined shadow bit, minimize other undefined bits.
1740 return
1741 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1742 } else {
1743 // Minimize undefined bits.
1744 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1745 }
1746 }
1747
1748 /// \brief Build the highest possible value of V, taking into account V's
1749 /// uninitialized bits.
1750 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1751 bool isSigned) {
1752 if (isSigned) {
1753 // Split shadow into sign bit and other bits.
1754 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1755 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1756 // Minimise the undefined shadow bit, maximise other undefined bits.
1757 return
1758 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1759 } else {
1760 // Maximize undefined bits.
1761 return IRB.CreateOr(A, Sa);
1762 }
1763 }
1764
1765 /// \brief Instrument relational comparisons.
1766 ///
1767 /// This function does exact shadow propagation for all relational
1768 /// comparisons of integers, pointers and vectors of those.
1769 /// FIXME: output seems suboptimal when one of the operands is a constant
1770 void handleRelationalComparisonExact(ICmpInst &I) {
1771 IRBuilder<> IRB(&I);
1772 Value *A = I.getOperand(0);
1773 Value *B = I.getOperand(1);
1774 Value *Sa = getShadow(A);
1775 Value *Sb = getShadow(B);
1776
1777 // Get rid of pointers and vectors of pointers.
1778 // For ints (and vectors of ints), types of A and Sa match,
1779 // and this is a no-op.
1780 A = IRB.CreatePointerCast(A, Sa->getType());
1781 B = IRB.CreatePointerCast(B, Sb->getType());
1782
1783 // Let [a0, a1] be the interval of possible values of A, taking into account
1784 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1785 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1786 bool IsSigned = I.isSigned();
1787 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1788 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1789 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1790 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1791 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1792 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1793 Value *Si = IRB.CreateXor(S1, S2);
1794 setShadow(&I, Si);
1795 setOriginForNaryOp(I);
1796 }
1797
1798 /// \brief Instrument signed relational comparisons.
1799 ///
1800 /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
1801 /// bit of the shadow. Everything else is delegated to handleShadowOr().
1802 void handleSignedRelationalComparison(ICmpInst &I) {
1803 Constant *constOp;
1804 Value *op = nullptr;
1805 CmpInst::Predicate pre;
1806 if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
1807 op = I.getOperand(0);
1808 pre = I.getPredicate();
1809 } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
1810 op = I.getOperand(1);
1811 pre = I.getSwappedPredicate();
1812 } else {
1813 handleShadowOr(I);
1814 return;
1815 }
1816
1817 if ((constOp->isNullValue() &&
1818 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
1819 (constOp->isAllOnesValue() &&
1820 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
1821 IRBuilder<> IRB(&I);
1822 Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
1823 "_msprop_icmp_s");
1824 setShadow(&I, Shadow);
1825 setOrigin(&I, getOrigin(op));
1826 } else {
1827 handleShadowOr(I);
1828 }
1829 }
1830
1831 void visitICmpInst(ICmpInst &I) {
1832 if (!ClHandleICmp) {
1833 handleShadowOr(I);
1834 return;
1835 }
1836 if (I.isEquality()) {
1837 handleEqualityComparison(I);
1838 return;
1839 }
1840
1841 assert(I.isRelational())((I.isRelational()) ? static_cast<void> (0) : __assert_fail
("I.isRelational()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1841, __PRETTY_FUNCTION__))
;
1842 if (ClHandleICmpExact) {
1843 handleRelationalComparisonExact(I);
1844 return;
1845 }
1846 if (I.isSigned()) {
1847 handleSignedRelationalComparison(I);
1848 return;
1849 }
1850
1851 assert(I.isUnsigned())((I.isUnsigned()) ? static_cast<void> (0) : __assert_fail
("I.isUnsigned()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1851, __PRETTY_FUNCTION__))
;
1852 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1853 handleRelationalComparisonExact(I);
1854 return;
1855 }
1856
1857 handleShadowOr(I);
1858 }
1859
1860 void visitFCmpInst(FCmpInst &I) {
1861 handleShadowOr(I);
1862 }
1863
1864 void handleShift(BinaryOperator &I) {
1865 IRBuilder<> IRB(&I);
1866 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1867 // Otherwise perform the same shift on S1.
1868 Value *S1 = getShadow(&I, 0);
1869 Value *S2 = getShadow(&I, 1);
1870 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1871 S2->getType());
1872 Value *V2 = I.getOperand(1);
1873 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1874 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1875 setOriginForNaryOp(I);
1876 }
1877
1878 void visitShl(BinaryOperator &I) { handleShift(I); }
1879 void visitAShr(BinaryOperator &I) { handleShift(I); }
1880 void visitLShr(BinaryOperator &I) { handleShift(I); }
1881
1882 /// \brief Instrument llvm.memmove
1883 ///
1884 /// At this point we don't know if llvm.memmove will be inlined or not.
1885 /// If we don't instrument it and it gets inlined,
1886 /// our interceptor will not kick in and we will lose the memmove.
1887 /// If we instrument the call here, but it does not get inlined,
1888 /// we will memove the shadow twice: which is bad in case
1889 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1890 ///
1891 /// Similar situation exists for memcpy and memset.
1892 void visitMemMoveInst(MemMoveInst &I) {
1893 IRBuilder<> IRB(&I);
1894 IRB.CreateCall(
1895 MS.MemmoveFn,
1896 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1897 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1898 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1899 I.eraseFromParent();
1900 }
1901
1902 // Similar to memmove: avoid copying shadow twice.
1903 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1904 // FIXME: consider doing manual inline for small constant sizes and proper
1905 // alignment.
1906 void visitMemCpyInst(MemCpyInst &I) {
1907 IRBuilder<> IRB(&I);
1908 IRB.CreateCall(
1909 MS.MemcpyFn,
1910 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1911 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1912 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1913 I.eraseFromParent();
1914 }
1915
1916 // Same as memcpy.
1917 void visitMemSetInst(MemSetInst &I) {
1918 IRBuilder<> IRB(&I);
1919 IRB.CreateCall(
1920 MS.MemsetFn,
1921 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1922 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1923 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1924 I.eraseFromParent();
1925 }
1926
1927 void visitVAStartInst(VAStartInst &I) {
1928 VAHelper->visitVAStartInst(I);
1929 }
1930
1931 void visitVACopyInst(VACopyInst &I) {
1932 VAHelper->visitVACopyInst(I);
1933 }
1934
1935 /// \brief Handle vector store-like intrinsics.
1936 ///
1937 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1938 /// has 1 pointer argument and 1 vector argument, returns void.
1939 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1940 IRBuilder<> IRB(&I);
1941 Value* Addr = I.getArgOperand(0);
1942 Value *Shadow = getShadow(&I, 1);
1943 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1944
1945 // We don't know the pointer alignment (could be unaligned SSE store!).
1946 // Have to assume to worst case.
1947 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1948
1949 if (ClCheckAccessAddress)
1950 insertShadowCheck(Addr, &I);
1951
1952 // FIXME: factor out common code from materializeStores
1953 if (MS.TrackOrigins)
1954 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
1955 return true;
1956 }
1957
1958 /// \brief Handle vector load-like intrinsics.
1959 ///
1960 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1961 /// has 1 pointer argument, returns a vector.
1962 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1963 IRBuilder<> IRB(&I);
1964 Value *Addr = I.getArgOperand(0);
1965
1966 Type *ShadowTy = getShadowTy(&I);
1967 if (PropagateShadow) {
1968 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1969 // We don't know the pointer alignment (could be unaligned SSE load!).
1970 // Have to assume to worst case.
1971 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1972 } else {
1973 setShadow(&I, getCleanShadow(&I));
1974 }
1975
1976 if (ClCheckAccessAddress)
1977 insertShadowCheck(Addr, &I);
1978
1979 if (MS.TrackOrigins) {
1980 if (PropagateShadow)
1981 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1)));
1982 else
1983 setOrigin(&I, getCleanOrigin());
1984 }
1985 return true;
1986 }
1987
1988 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1989 ///
1990 /// Instrument intrinsics with any number of arguments of the same type,
1991 /// equal to the return type. The type should be simple (no aggregates or
1992 /// pointers; vectors are fine).
1993 /// Caller guarantees that this intrinsic does not access memory.
1994 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1995 Type *RetTy = I.getType();
1996 if (!(RetTy->isIntOrIntVectorTy() ||
1997 RetTy->isFPOrFPVectorTy() ||
1998 RetTy->isX86_MMXTy()))
1999 return false;
2000
2001 unsigned NumArgOperands = I.getNumArgOperands();
2002
2003 for (unsigned i = 0; i < NumArgOperands; ++i) {
2004 Type *Ty = I.getArgOperand(i)->getType();
2005 if (Ty != RetTy)
2006 return false;
2007 }
2008
2009 IRBuilder<> IRB(&I);
2010 ShadowAndOriginCombiner SC(this, IRB);
2011 for (unsigned i = 0; i < NumArgOperands; ++i)
2012 SC.Add(I.getArgOperand(i));
2013 SC.Done(&I);
2014
2015 return true;
2016 }
2017
2018 /// \brief Heuristically instrument unknown intrinsics.
2019 ///
2020 /// The main purpose of this code is to do something reasonable with all
2021 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2022 /// We recognize several classes of intrinsics by their argument types and
2023 /// ModRefBehaviour and apply special intrumentation when we are reasonably
2024 /// sure that we know what the intrinsic does.
2025 ///
2026 /// We special-case intrinsics where this approach fails. See llvm.bswap
2027 /// handling as an example of that.
2028 bool handleUnknownIntrinsic(IntrinsicInst &I) {
2029 unsigned NumArgOperands = I.getNumArgOperands();
2030 if (NumArgOperands == 0)
2031 return false;
2032
2033 if (NumArgOperands == 2 &&
2034 I.getArgOperand(0)->getType()->isPointerTy() &&
2035 I.getArgOperand(1)->getType()->isVectorTy() &&
2036 I.getType()->isVoidTy() &&
2037 !I.onlyReadsMemory()) {
2038 // This looks like a vector store.
2039 return handleVectorStoreIntrinsic(I);
2040 }
2041
2042 if (NumArgOperands == 1 &&
2043 I.getArgOperand(0)->getType()->isPointerTy() &&
2044 I.getType()->isVectorTy() &&
2045 I.onlyReadsMemory()) {
2046 // This looks like a vector load.
2047 return handleVectorLoadIntrinsic(I);
2048 }
2049
2050 if (I.doesNotAccessMemory())
2051 if (maybeHandleSimpleNomemIntrinsic(I))
2052 return true;
2053
2054 // FIXME: detect and handle SSE maskstore/maskload
2055 return false;
2056 }
2057
2058 void handleBswap(IntrinsicInst &I) {
2059 IRBuilder<> IRB(&I);
2060 Value *Op = I.getArgOperand(0);
2061 Type *OpType = Op->getType();
2062 Function *BswapFunc = Intrinsic::getDeclaration(
2063 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2064 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2065 setOrigin(&I, getOrigin(Op));
2066 }
2067
2068 // \brief Instrument vector convert instrinsic.
2069 //
2070 // This function instruments intrinsics like cvtsi2ss:
2071 // %Out = int_xxx_cvtyyy(%ConvertOp)
2072 // or
2073 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2074 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2075 // number \p Out elements, and (if has 2 arguments) copies the rest of the
2076 // elements from \p CopyOp.
2077 // In most cases conversion involves floating-point value which may trigger a
2078 // hardware exception when not fully initialized. For this reason we require
2079 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2080 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2081 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2082 // return a fully initialized value.
2083 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2084 IRBuilder<> IRB(&I);
2085 Value *CopyOp, *ConvertOp;
2086
2087 switch (I.getNumArgOperands()) {
2088 case 3:
2089 assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode")((isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantInt>(I.getArgOperand(2)) && \"Invalid rounding mode\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2089, __PRETTY_FUNCTION__))
;
2090 case 2:
2091 CopyOp = I.getArgOperand(0);
2092 ConvertOp = I.getArgOperand(1);
2093 break;
2094 case 1:
2095 ConvertOp = I.getArgOperand(0);
2096 CopyOp = nullptr;
2097 break;
2098 default:
2099 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.")::llvm::llvm_unreachable_internal("Cvt intrinsic with unsupported number of arguments."
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2099)
;
2100 }
2101
2102 // The first *NumUsedElements* elements of ConvertOp are converted to the
2103 // same number of output elements. The rest of the output is copied from
2104 // CopyOp, or (if not available) filled with zeroes.
2105 // Combine shadow for elements of ConvertOp that are used in this operation,
2106 // and insert a check.
2107 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2108 // int->any conversion.
2109 Value *ConvertShadow = getShadow(ConvertOp);
2110 Value *AggShadow = nullptr;
2111 if (ConvertOp->getType()->isVectorTy()) {
2112 AggShadow = IRB.CreateExtractElement(
2113 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2114 for (int i = 1; i < NumUsedElements; ++i) {
2115 Value *MoreShadow = IRB.CreateExtractElement(
2116 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2117 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2118 }
2119 } else {
2120 AggShadow = ConvertShadow;
2121 }
2122 assert(AggShadow->getType()->isIntegerTy())((AggShadow->getType()->isIntegerTy()) ? static_cast<
void> (0) : __assert_fail ("AggShadow->getType()->isIntegerTy()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2122, __PRETTY_FUNCTION__))
;
2123 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2124
2125 // Build result shadow by zero-filling parts of CopyOp shadow that come from
2126 // ConvertOp.
2127 if (CopyOp) {
2128 assert(CopyOp->getType() == I.getType())((CopyOp->getType() == I.getType()) ? static_cast<void>
(0) : __assert_fail ("CopyOp->getType() == I.getType()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2128, __PRETTY_FUNCTION__))
;
2129 assert(CopyOp->getType()->isVectorTy())((CopyOp->getType()->isVectorTy()) ? static_cast<void
> (0) : __assert_fail ("CopyOp->getType()->isVectorTy()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2129, __PRETTY_FUNCTION__))
;
2130 Value *ResultShadow = getShadow(CopyOp);
2131 Type *EltTy = ResultShadow->getType()->getVectorElementType();
2132 for (int i = 0; i < NumUsedElements; ++i) {
2133 ResultShadow = IRB.CreateInsertElement(
2134 ResultShadow, ConstantInt::getNullValue(EltTy),
2135 ConstantInt::get(IRB.getInt32Ty(), i));
2136 }
2137 setShadow(&I, ResultShadow);
2138 setOrigin(&I, getOrigin(CopyOp));
2139 } else {
2140 setShadow(&I, getCleanShadow(&I));
2141 setOrigin(&I, getCleanOrigin());
2142 }
2143 }
2144
2145 // Given a scalar or vector, extract lower 64 bits (or less), and return all
2146 // zeroes if it is zero, and all ones otherwise.
2147 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2148 if (S->getType()->isVectorTy())
2149 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2150 assert(S->getType()->getPrimitiveSizeInBits() <= 64)((S->getType()->getPrimitiveSizeInBits() <= 64) ? static_cast
<void> (0) : __assert_fail ("S->getType()->getPrimitiveSizeInBits() <= 64"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2150, __PRETTY_FUNCTION__))
;
2151 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2152 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2153 }
2154
2155 // Given a vector, extract its first element, and return all
2156 // zeroes if it is zero, and all ones otherwise.
2157 Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2158 Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2159 Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2160 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2161 }
2162
2163 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2164 Type *T = S->getType();
2165 assert(T->isVectorTy())((T->isVectorTy()) ? static_cast<void> (0) : __assert_fail
("T->isVectorTy()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2165, __PRETTY_FUNCTION__))
;
2166 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2167 return IRB.CreateSExt(S2, T);
2168 }
2169
2170 // \brief Instrument vector shift instrinsic.
2171 //
2172 // This function instruments intrinsics like int_x86_avx2_psll_w.
2173 // Intrinsic shifts %In by %ShiftSize bits.
2174 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2175 // size, and the rest is ignored. Behavior is defined even if shift size is
2176 // greater than register (or field) width.
2177 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2178 assert(I.getNumArgOperands() == 2)((I.getNumArgOperands() == 2) ? static_cast<void> (0) :
__assert_fail ("I.getNumArgOperands() == 2", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2178, __PRETTY_FUNCTION__))
;
2179 IRBuilder<> IRB(&I);
2180 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2181 // Otherwise perform the same shift on S1.
2182 Value *S1 = getShadow(&I, 0);
2183 Value *S2 = getShadow(&I, 1);
2184 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2185 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2186 Value *V1 = I.getOperand(0);
2187 Value *V2 = I.getOperand(1);
2188 Value *Shift = IRB.CreateCall(I.getCalledValue(),
2189 {IRB.CreateBitCast(S1, V1->getType()), V2});
2190 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2191 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2192 setOriginForNaryOp(I);
2193 }
2194
2195 // \brief Get an X86_MMX-sized vector type.
2196 Type *getMMXVectorTy(unsigned EltSizeInBits) {
2197 const unsigned X86_MMXSizeInBits = 64;
2198 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2199 X86_MMXSizeInBits / EltSizeInBits);
2200 }
2201
2202 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
2203 // intrinsic.
2204 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2205 switch (id) {
2206 case llvm::Intrinsic::x86_sse2_packsswb_128:
2207 case llvm::Intrinsic::x86_sse2_packuswb_128:
2208 return llvm::Intrinsic::x86_sse2_packsswb_128;
2209
2210 case llvm::Intrinsic::x86_sse2_packssdw_128:
2211 case llvm::Intrinsic::x86_sse41_packusdw:
2212 return llvm::Intrinsic::x86_sse2_packssdw_128;
2213
2214 case llvm::Intrinsic::x86_avx2_packsswb:
2215 case llvm::Intrinsic::x86_avx2_packuswb:
2216 return llvm::Intrinsic::x86_avx2_packsswb;
2217
2218 case llvm::Intrinsic::x86_avx2_packssdw:
2219 case llvm::Intrinsic::x86_avx2_packusdw:
2220 return llvm::Intrinsic::x86_avx2_packssdw;
2221
2222 case llvm::Intrinsic::x86_mmx_packsswb:
2223 case llvm::Intrinsic::x86_mmx_packuswb:
2224 return llvm::Intrinsic::x86_mmx_packsswb;
2225
2226 case llvm::Intrinsic::x86_mmx_packssdw:
2227 return llvm::Intrinsic::x86_mmx_packssdw;
2228 default:
2229 llvm_unreachable("unexpected intrinsic id")::llvm::llvm_unreachable_internal("unexpected intrinsic id", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2229)
;
2230 }
2231 }
2232
2233 // \brief Instrument vector pack instrinsic.
2234 //
2235 // This function instruments intrinsics like x86_mmx_packsswb, that
2236 // packs elements of 2 input vectors into half as many bits with saturation.
2237 // Shadow is propagated with the signed variant of the same intrinsic applied
2238 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2239 // EltSizeInBits is used only for x86mmx arguments.
2240 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2241 assert(I.getNumArgOperands() == 2)((I.getNumArgOperands() == 2) ? static_cast<void> (0) :
__assert_fail ("I.getNumArgOperands() == 2", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2241, __PRETTY_FUNCTION__))
;
2242 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2243 IRBuilder<> IRB(&I);
2244 Value *S1 = getShadow(&I, 0);
2245 Value *S2 = getShadow(&I, 1);
2246 assert(isX86_MMX || S1->getType()->isVectorTy())((isX86_MMX || S1->getType()->isVectorTy()) ? static_cast
<void> (0) : __assert_fail ("isX86_MMX || S1->getType()->isVectorTy()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2246, __PRETTY_FUNCTION__))
;
2247
2248 // SExt and ICmpNE below must apply to individual elements of input vectors.
2249 // In case of x86mmx arguments, cast them to appropriate vector types and
2250 // back.
2251 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2252 if (isX86_MMX) {
2253 S1 = IRB.CreateBitCast(S1, T);
2254 S2 = IRB.CreateBitCast(S2, T);
2255 }
2256 Value *S1_ext = IRB.CreateSExt(
2257 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
2258 Value *S2_ext = IRB.CreateSExt(
2259 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
2260 if (isX86_MMX) {
2261 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2262 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2263 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2264 }
2265
2266 Function *ShadowFn = Intrinsic::getDeclaration(
2267 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2268
2269 Value *S =
2270 IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2271 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2272 setShadow(&I, S);
2273 setOriginForNaryOp(I);
2274 }
2275
2276 // \brief Instrument sum-of-absolute-differencies intrinsic.
2277 void handleVectorSadIntrinsic(IntrinsicInst &I) {
2278 const unsigned SignificantBitsPerResultElement = 16;
2279 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2280 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2281 unsigned ZeroBitsPerResultElement =
2282 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2283
2284 IRBuilder<> IRB(&I);
2285 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2286 S = IRB.CreateBitCast(S, ResTy);
2287 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2288 ResTy);
2289 S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2290 S = IRB.CreateBitCast(S, getShadowTy(&I));
2291 setShadow(&I, S);
2292 setOriginForNaryOp(I);
2293 }
2294
2295 // \brief Instrument multiply-add intrinsic.
2296 void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2297 unsigned EltSizeInBits = 0) {
2298 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2299 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2300 IRBuilder<> IRB(&I);
2301 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2302 S = IRB.CreateBitCast(S, ResTy);
2303 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2304 ResTy);
2305 S = IRB.CreateBitCast(S, getShadowTy(&I));
2306 setShadow(&I, S);
2307 setOriginForNaryOp(I);
2308 }
2309
2310 // \brief Instrument compare-packed intrinsic.
2311 // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2312 // all-ones shadow.
2313 void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2314 IRBuilder<> IRB(&I);
2315 Type *ResTy = getShadowTy(&I);
2316 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2317 Value *S = IRB.CreateSExt(
2318 IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2319 setShadow(&I, S);
2320 setOriginForNaryOp(I);
2321 }
2322
2323 // \brief Instrument compare-scalar intrinsic.
2324 // This handles both cmp* intrinsics which return the result in the first
2325 // element of a vector, and comi* which return the result as i32.
2326 void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2327 IRBuilder<> IRB(&I);
2328 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2329 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2330 setShadow(&I, S);
2331 setOriginForNaryOp(I);
2332 }
2333
2334 void handleStmxcsr(IntrinsicInst &I) {
2335 IRBuilder<> IRB(&I);
2336 Value* Addr = I.getArgOperand(0);
2337 Type *Ty = IRB.getInt32Ty();
2338 Value *ShadowPtr = getShadowPtr(Addr, Ty, IRB);
2339
2340 IRB.CreateStore(getCleanShadow(Ty),
2341 IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
2342
2343 if (ClCheckAccessAddress)
2344 insertShadowCheck(Addr, &I);
2345 }
2346
2347 void handleLdmxcsr(IntrinsicInst &I) {
2348 if (!InsertChecks) return;
2349
2350 IRBuilder<> IRB(&I);
2351 Value *Addr = I.getArgOperand(0);
2352 Type *Ty = IRB.getInt32Ty();
2353 unsigned Alignment = 1;
2354
2355 if (ClCheckAccessAddress)
2356 insertShadowCheck(Addr, &I);
2357
2358 Value *Shadow = IRB.CreateAlignedLoad(getShadowPtr(Addr, Ty, IRB),
2359 Alignment, "_ldmxcsr");
2360 Value *Origin = MS.TrackOrigins
2361 ? IRB.CreateLoad(getOriginPtr(Addr, IRB, Alignment))
2362 : getCleanOrigin();
2363 insertShadowCheck(Shadow, Origin, &I);
2364 }
2365
2366 void visitIntrinsicInst(IntrinsicInst &I) {
2367 switch (I.getIntrinsicID()) {
2368 case llvm::Intrinsic::bswap:
2369 handleBswap(I);
2370 break;
2371 case llvm::Intrinsic::x86_sse_stmxcsr:
2372 handleStmxcsr(I);
2373 break;
2374 case llvm::Intrinsic::x86_sse_ldmxcsr:
2375 handleLdmxcsr(I);
2376 break;
2377 case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
2378 case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
2379 case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
2380 case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
2381 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2382 case llvm::Intrinsic::x86_avx512_cvttss2usi:
2383 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2384 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2385 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2386 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2387 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2388 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2389 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2390 case llvm::Intrinsic::x86_sse2_cvtsd2si:
2391 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2392 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2393 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2394 case llvm::Intrinsic::x86_sse2_cvtss2sd:
2395 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2396 case llvm::Intrinsic::x86_sse2_cvttsd2si:
2397 case llvm::Intrinsic::x86_sse_cvtsi2ss:
2398 case llvm::Intrinsic::x86_sse_cvtsi642ss:
2399 case llvm::Intrinsic::x86_sse_cvtss2si64:
2400 case llvm::Intrinsic::x86_sse_cvtss2si:
2401 case llvm::Intrinsic::x86_sse_cvttss2si64:
2402 case llvm::Intrinsic::x86_sse_cvttss2si:
2403 handleVectorConvertIntrinsic(I, 1);
2404 break;
2405 case llvm::Intrinsic::x86_sse_cvtps2pi:
2406 case llvm::Intrinsic::x86_sse_cvttps2pi:
2407 handleVectorConvertIntrinsic(I, 2);
2408 break;
2409
2410 case llvm::Intrinsic::x86_avx512_psll_w_512:
2411 case llvm::Intrinsic::x86_avx512_psll_d_512:
2412 case llvm::Intrinsic::x86_avx512_psll_q_512:
2413 case llvm::Intrinsic::x86_avx512_pslli_w_512:
2414 case llvm::Intrinsic::x86_avx512_pslli_d_512:
2415 case llvm::Intrinsic::x86_avx512_pslli_q_512:
2416 case llvm::Intrinsic::x86_avx512_psrl_w_512:
2417 case llvm::Intrinsic::x86_avx512_psrl_d_512:
2418 case llvm::Intrinsic::x86_avx512_psrl_q_512:
2419 case llvm::Intrinsic::x86_avx512_psra_w_512:
2420 case llvm::Intrinsic::x86_avx512_psra_d_512:
2421 case llvm::Intrinsic::x86_avx512_psra_q_512:
2422 case llvm::Intrinsic::x86_avx512_psrli_w_512:
2423 case llvm::Intrinsic::x86_avx512_psrli_d_512:
2424 case llvm::Intrinsic::x86_avx512_psrli_q_512:
2425 case llvm::Intrinsic::x86_avx512_psrai_w_512:
2426 case llvm::Intrinsic::x86_avx512_psrai_d_512:
2427 case llvm::Intrinsic::x86_avx512_psrai_q_512:
2428 case llvm::Intrinsic::x86_avx512_psra_q_256:
2429 case llvm::Intrinsic::x86_avx512_psra_q_128:
2430 case llvm::Intrinsic::x86_avx512_psrai_q_256:
2431 case llvm::Intrinsic::x86_avx512_psrai_q_128:
2432 case llvm::Intrinsic::x86_avx2_psll_w:
2433 case llvm::Intrinsic::x86_avx2_psll_d:
2434 case llvm::Intrinsic::x86_avx2_psll_q:
2435 case llvm::Intrinsic::x86_avx2_pslli_w:
2436 case llvm::Intrinsic::x86_avx2_pslli_d:
2437 case llvm::Intrinsic::x86_avx2_pslli_q:
2438 case llvm::Intrinsic::x86_avx2_psrl_w:
2439 case llvm::Intrinsic::x86_avx2_psrl_d:
2440 case llvm::Intrinsic::x86_avx2_psrl_q:
2441 case llvm::Intrinsic::x86_avx2_psra_w:
2442 case llvm::Intrinsic::x86_avx2_psra_d:
2443 case llvm::Intrinsic::x86_avx2_psrli_w:
2444 case llvm::Intrinsic::x86_avx2_psrli_d:
2445 case llvm::Intrinsic::x86_avx2_psrli_q:
2446 case llvm::Intrinsic::x86_avx2_psrai_w:
2447 case llvm::Intrinsic::x86_avx2_psrai_d:
2448 case llvm::Intrinsic::x86_sse2_psll_w:
2449 case llvm::Intrinsic::x86_sse2_psll_d:
2450 case llvm::Intrinsic::x86_sse2_psll_q:
2451 case llvm::Intrinsic::x86_sse2_pslli_w:
2452 case llvm::Intrinsic::x86_sse2_pslli_d:
2453 case llvm::Intrinsic::x86_sse2_pslli_q:
2454 case llvm::Intrinsic::x86_sse2_psrl_w:
2455 case llvm::Intrinsic::x86_sse2_psrl_d:
2456 case llvm::Intrinsic::x86_sse2_psrl_q:
2457 case llvm::Intrinsic::x86_sse2_psra_w:
2458 case llvm::Intrinsic::x86_sse2_psra_d:
2459 case llvm::Intrinsic::x86_sse2_psrli_w:
2460 case llvm::Intrinsic::x86_sse2_psrli_d:
2461 case llvm::Intrinsic::x86_sse2_psrli_q:
2462 case llvm::Intrinsic::x86_sse2_psrai_w:
2463 case llvm::Intrinsic::x86_sse2_psrai_d:
2464 case llvm::Intrinsic::x86_mmx_psll_w:
2465 case llvm::Intrinsic::x86_mmx_psll_d:
2466 case llvm::Intrinsic::x86_mmx_psll_q:
2467 case llvm::Intrinsic::x86_mmx_pslli_w:
2468 case llvm::Intrinsic::x86_mmx_pslli_d:
2469 case llvm::Intrinsic::x86_mmx_pslli_q:
2470 case llvm::Intrinsic::x86_mmx_psrl_w:
2471 case llvm::Intrinsic::x86_mmx_psrl_d:
2472 case llvm::Intrinsic::x86_mmx_psrl_q:
2473 case llvm::Intrinsic::x86_mmx_psra_w:
2474 case llvm::Intrinsic::x86_mmx_psra_d:
2475 case llvm::Intrinsic::x86_mmx_psrli_w:
2476 case llvm::Intrinsic::x86_mmx_psrli_d:
2477 case llvm::Intrinsic::x86_mmx_psrli_q:
2478 case llvm::Intrinsic::x86_mmx_psrai_w:
2479 case llvm::Intrinsic::x86_mmx_psrai_d:
2480 handleVectorShiftIntrinsic(I, /* Variable */ false);
2481 break;
2482 case llvm::Intrinsic::x86_avx2_psllv_d:
2483 case llvm::Intrinsic::x86_avx2_psllv_d_256:
2484 case llvm::Intrinsic::x86_avx512_psllv_d_512:
2485 case llvm::Intrinsic::x86_avx2_psllv_q:
2486 case llvm::Intrinsic::x86_avx2_psllv_q_256:
2487 case llvm::Intrinsic::x86_avx512_psllv_q_512:
2488 case llvm::Intrinsic::x86_avx2_psrlv_d:
2489 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2490 case llvm::Intrinsic::x86_avx512_psrlv_d_512:
2491 case llvm::Intrinsic::x86_avx2_psrlv_q:
2492 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2493 case llvm::Intrinsic::x86_avx512_psrlv_q_512:
2494 case llvm::Intrinsic::x86_avx2_psrav_d:
2495 case llvm::Intrinsic::x86_avx2_psrav_d_256:
2496 case llvm::Intrinsic::x86_avx512_psrav_d_512:
2497 case llvm::Intrinsic::x86_avx512_psrav_q_128:
2498 case llvm::Intrinsic::x86_avx512_psrav_q_256:
2499 case llvm::Intrinsic::x86_avx512_psrav_q_512:
2500 handleVectorShiftIntrinsic(I, /* Variable */ true);
2501 break;
2502
2503 case llvm::Intrinsic::x86_sse2_packsswb_128:
2504 case llvm::Intrinsic::x86_sse2_packssdw_128:
2505 case llvm::Intrinsic::x86_sse2_packuswb_128:
2506 case llvm::Intrinsic::x86_sse41_packusdw:
2507 case llvm::Intrinsic::x86_avx2_packsswb:
2508 case llvm::Intrinsic::x86_avx2_packssdw:
2509 case llvm::Intrinsic::x86_avx2_packuswb:
2510 case llvm::Intrinsic::x86_avx2_packusdw:
2511 handleVectorPackIntrinsic(I);
2512 break;
2513
2514 case llvm::Intrinsic::x86_mmx_packsswb:
2515 case llvm::Intrinsic::x86_mmx_packuswb:
2516 handleVectorPackIntrinsic(I, 16);
2517 break;
2518
2519 case llvm::Intrinsic::x86_mmx_packssdw:
2520 handleVectorPackIntrinsic(I, 32);
2521 break;
2522
2523 case llvm::Intrinsic::x86_mmx_psad_bw:
2524 case llvm::Intrinsic::x86_sse2_psad_bw:
2525 case llvm::Intrinsic::x86_avx2_psad_bw:
2526 handleVectorSadIntrinsic(I);
2527 break;
2528
2529 case llvm::Intrinsic::x86_sse2_pmadd_wd:
2530 case llvm::Intrinsic::x86_avx2_pmadd_wd:
2531 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2532 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
2533 handleVectorPmaddIntrinsic(I);
2534 break;
2535
2536 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
2537 handleVectorPmaddIntrinsic(I, 8);
2538 break;
2539
2540 case llvm::Intrinsic::x86_mmx_pmadd_wd:
2541 handleVectorPmaddIntrinsic(I, 16);
2542 break;
2543
2544 case llvm::Intrinsic::x86_sse_cmp_ss:
2545 case llvm::Intrinsic::x86_sse2_cmp_sd:
2546 case llvm::Intrinsic::x86_sse_comieq_ss:
2547 case llvm::Intrinsic::x86_sse_comilt_ss:
2548 case llvm::Intrinsic::x86_sse_comile_ss:
2549 case llvm::Intrinsic::x86_sse_comigt_ss:
2550 case llvm::Intrinsic::x86_sse_comige_ss:
2551 case llvm::Intrinsic::x86_sse_comineq_ss:
2552 case llvm::Intrinsic::x86_sse_ucomieq_ss:
2553 case llvm::Intrinsic::x86_sse_ucomilt_ss:
2554 case llvm::Intrinsic::x86_sse_ucomile_ss:
2555 case llvm::Intrinsic::x86_sse_ucomigt_ss:
2556 case llvm::Intrinsic::x86_sse_ucomige_ss:
2557 case llvm::Intrinsic::x86_sse_ucomineq_ss:
2558 case llvm::Intrinsic::x86_sse2_comieq_sd:
2559 case llvm::Intrinsic::x86_sse2_comilt_sd:
2560 case llvm::Intrinsic::x86_sse2_comile_sd:
2561 case llvm::Intrinsic::x86_sse2_comigt_sd:
2562 case llvm::Intrinsic::x86_sse2_comige_sd:
2563 case llvm::Intrinsic::x86_sse2_comineq_sd:
2564 case llvm::Intrinsic::x86_sse2_ucomieq_sd:
2565 case llvm::Intrinsic::x86_sse2_ucomilt_sd:
2566 case llvm::Intrinsic::x86_sse2_ucomile_sd:
2567 case llvm::Intrinsic::x86_sse2_ucomigt_sd:
2568 case llvm::Intrinsic::x86_sse2_ucomige_sd:
2569 case llvm::Intrinsic::x86_sse2_ucomineq_sd:
2570 handleVectorCompareScalarIntrinsic(I);
2571 break;
2572
2573 case llvm::Intrinsic::x86_sse_cmp_ps:
2574 case llvm::Intrinsic::x86_sse2_cmp_pd:
2575 // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
2576 // generates reasonably looking IR that fails in the backend with "Do not
2577 // know how to split the result of this operator!".
2578 handleVectorComparePackedIntrinsic(I);
2579 break;
2580
2581 default:
2582 if (!handleUnknownIntrinsic(I))
2583 visitInstruction(I);
2584 break;
2585 }
2586 }
2587
2588 void visitCallSite(CallSite CS) {
2589 Instruction &I = *CS.getInstruction();
2590 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite")(((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"
) ? static_cast<void> (0) : __assert_fail ("(CS.isCall() || CS.isInvoke()) && \"Unknown type of CallSite\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2590, __PRETTY_FUNCTION__))
;
2591 if (CS.isCall()) {
2592 CallInst *Call = cast<CallInst>(&I);
2593
2594 // For inline asm, do the usual thing: check argument shadow and mark all
2595 // outputs as clean. Note that any side effects of the inline asm that are
2596 // not immediately visible in its constraints are not handled.
2597 if (Call->isInlineAsm()) {
2598 visitInstruction(I);
2599 return;
2600 }
2601
2602 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere")((!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"
) ? static_cast<void> (0) : __assert_fail ("!isa<IntrinsicInst>(&I) && \"intrinsics are handled elsewhere\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2602, __PRETTY_FUNCTION__))
;
2603
2604 // We are going to insert code that relies on the fact that the callee
2605 // will become a non-readonly function after it is instrumented by us. To
2606 // prevent this code from being optimized out, mark that function
2607 // non-readonly in advance.
2608 if (Function *Func = Call->getCalledFunction()) {
2609 // Clear out readonly/readnone attributes.
2610 AttrBuilder B;
2611 B.addAttribute(Attribute::ReadOnly)
2612 .addAttribute(Attribute::ReadNone);
2613 Func->removeAttributes(AttributeList::FunctionIndex, B);
2614 }
2615
2616 maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
2617 }
2618 IRBuilder<> IRB(&I);
2619
2620 unsigned ArgOffset = 0;
2621 DEBUG(dbgs() << " CallSite: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " CallSite: " << I <<
"\n"; } } while (false)
;
2622 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2623 ArgIt != End; ++ArgIt) {
2624 Value *A = *ArgIt;
2625 unsigned i = ArgIt - CS.arg_begin();
2626 if (!A->getType()->isSized()) {
2627 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Arg " << i << " is not sized: "
<< I << "\n"; } } while (false)
;
2628 continue;
2629 }
2630 unsigned Size = 0;
2631 Value *Store = nullptr;
2632 // Compute the Shadow for arg even if it is ByVal, because
2633 // in that case getShadow() will copy the actual arg shadow to
2634 // __msan_param_tls.
2635 Value *ArgShadow = getShadow(A);
2636 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2637 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Arg#" << i << ": "
<< *A << " Shadow: " << *ArgShadow <<
"\n"; } } while (false)
2638 " Shadow: " << *ArgShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Arg#" << i << ": "
<< *A << " Shadow: " << *ArgShadow <<
"\n"; } } while (false)
;
2639 bool ArgIsInitialized = false;
2640 const DataLayout &DL = F.getParent()->getDataLayout();
2641 if (CS.paramHasAttr(i, Attribute::ByVal)) {
2642 assert(A->getType()->isPointerTy() &&((A->getType()->isPointerTy() && "ByVal argument is not a pointer!"
) ? static_cast<void> (0) : __assert_fail ("A->getType()->isPointerTy() && \"ByVal argument is not a pointer!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2643, __PRETTY_FUNCTION__))
2643 "ByVal argument is not a pointer!")((A->getType()->isPointerTy() && "ByVal argument is not a pointer!"
) ? static_cast<void> (0) : __assert_fail ("A->getType()->isPointerTy() && \"ByVal argument is not a pointer!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2643, __PRETTY_FUNCTION__))
;
2644 Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
2645 if (ArgOffset + Size > kParamTLSSize) break;
2646 unsigned ParamAlignment = CS.getParamAlignment(i);
2647 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
2648 Store = IRB.CreateMemCpy(ArgShadowBase,
2649 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2650 Size, Alignment);
2651 } else {
2652 Size = DL.getTypeAllocSize(A->getType());
2653 if (ArgOffset + Size > kParamTLSSize) break;
2654 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2655 kShadowTLSAlignment);
2656 Constant *Cst = dyn_cast<Constant>(ArgShadow);
2657 if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
2658 }
2659 if (MS.TrackOrigins && !ArgIsInitialized)
2660 IRB.CreateStore(getOrigin(A),
2661 getOriginPtrForArgument(A, IRB, ArgOffset));
2662 (void)Store;
2663 assert(Size != 0 && Store != nullptr)((Size != 0 && Store != nullptr) ? static_cast<void
> (0) : __assert_fail ("Size != 0 && Store != nullptr"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2663, __PRETTY_FUNCTION__))
;
2664 DEBUG(dbgs() << " Param:" << *Store << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Param:" << *Store <<
"\n"; } } while (false)
;
2665 ArgOffset += alignTo(Size, 8);
2666 }
2667 DEBUG(dbgs() << " done with call args\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " done with call args\n"; } } while
(false)
;
2668
2669 FunctionType *FT =
2670 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2671 if (FT->isVarArg()) {
2672 VAHelper->visitCallSite(CS, IRB);
2673 }
2674
2675 // Now, get the shadow for the RetVal.
2676 if (!I.getType()->isSized()) return;
2677 // Don't emit the epilogue for musttail call returns.
2678 if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
2679 IRBuilder<> IRBBefore(&I);
2680 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2681 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2682 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2683 BasicBlock::iterator NextInsn;
2684 if (CS.isCall()) {
2685 NextInsn = ++I.getIterator();
2686 assert(NextInsn != I.getParent()->end())((NextInsn != I.getParent()->end()) ? static_cast<void>
(0) : __assert_fail ("NextInsn != I.getParent()->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2686, __PRETTY_FUNCTION__))
;
2687 } else {
2688 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2689 if (!NormalDest->getSinglePredecessor()) {
2690 // FIXME: this case is tricky, so we are just conservative here.
2691 // Perhaps we need to split the edge between this BB and NormalDest,
2692 // but a naive attempt to use SplitEdge leads to a crash.
2693 setShadow(&I, getCleanShadow(&I));
2694 setOrigin(&I, getCleanOrigin());
2695 return;
2696 }
2697 NextInsn = NormalDest->getFirstInsertionPt();
2698 assert(NextInsn != NormalDest->end() &&((NextInsn != NormalDest->end() && "Could not find insertion point for retval shadow load"
) ? static_cast<void> (0) : __assert_fail ("NextInsn != NormalDest->end() && \"Could not find insertion point for retval shadow load\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2699, __PRETTY_FUNCTION__))
2699 "Could not find insertion point for retval shadow load")((NextInsn != NormalDest->end() && "Could not find insertion point for retval shadow load"
) ? static_cast<void> (0) : __assert_fail ("NextInsn != NormalDest->end() && \"Could not find insertion point for retval shadow load\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2699, __PRETTY_FUNCTION__))
;
2700 }
2701 IRBuilder<> IRBAfter(&*NextInsn);
2702 Value *RetvalShadow =
2703 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2704 kShadowTLSAlignment, "_msret");
2705 setShadow(&I, RetvalShadow);
2706 if (MS.TrackOrigins)
2707 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2708 }
2709
2710 bool isAMustTailRetVal(Value *RetVal) {
2711 if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
2712 RetVal = I->getOperand(0);
2713 }
2714 if (auto *I = dyn_cast<CallInst>(RetVal)) {
2715 return I->isMustTailCall();
2716 }
2717 return false;
2718 }
2719
2720 void visitReturnInst(ReturnInst &I) {
2721 IRBuilder<> IRB(&I);
2722 Value *RetVal = I.getReturnValue();
2723 if (!RetVal) return;
2724 // Don't emit the epilogue for musttail call returns.
2725 if (isAMustTailRetVal(RetVal)) return;
2726 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2727 if (CheckReturnValue) {
2728 insertShadowCheck(RetVal, &I);
2729 Value *Shadow = getCleanShadow(RetVal);
2730 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2731 } else {
2732 Value *Shadow = getShadow(RetVal);
2733 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2734 if (MS.TrackOrigins)
2735 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2736 }
2737 }
2738
2739 void visitPHINode(PHINode &I) {
2740 IRBuilder<> IRB(&I);
2741 if (!PropagateShadow) {
2742 setShadow(&I, getCleanShadow(&I));
2743 setOrigin(&I, getCleanOrigin());
2744 return;
2745 }
2746
2747 ShadowPHINodes.push_back(&I);
2748 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2749 "_msphi_s"));
2750 if (MS.TrackOrigins)
2751 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2752 "_msphi_o"));
2753 }
2754
2755 void visitAllocaInst(AllocaInst &I) {
2756 setShadow(&I, getCleanShadow(&I));
2757 setOrigin(&I, getCleanOrigin());
2758 IRBuilder<> IRB(I.getNextNode());
2759 const DataLayout &DL = F.getParent()->getDataLayout();
2760 uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
2761 Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
2762 if (I.isArrayAllocation())
2763 Len = IRB.CreateMul(Len, I.getArraySize());
2764 if (PoisonStack && ClPoisonStackWithCall) {
2765 IRB.CreateCall(MS.MsanPoisonStackFn,
2766 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
2767 } else {
2768 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2769 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2770 IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlignment());
2771 }
2772
2773 if (PoisonStack && MS.TrackOrigins) {
2774 SmallString<2048> StackDescriptionStorage;
2775 raw_svector_ostream StackDescription(StackDescriptionStorage);
2776 // We create a string with a description of the stack allocation and
2777 // pass it into __msan_set_alloca_origin.
2778 // It will be printed by the run-time if stack-originated UMR is found.
2779 // The first 4 bytes of the string are set to '----' and will be replaced
2780 // by __msan_va_arg_overflow_size_tls at the first call.
2781 StackDescription << "----" << I.getName() << "@" << F.getName();
2782 Value *Descr =
2783 createPrivateNonConstGlobalForString(*F.getParent(),
2784 StackDescription.str());
2785
2786 IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
2787 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
2788 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2789 IRB.CreatePointerCast(&F, MS.IntptrTy)});
2790 }
2791 }
2792
2793 void visitSelectInst(SelectInst& I) {
2794 IRBuilder<> IRB(&I);
2795 // a = select b, c, d
2796 Value *B = I.getCondition();
2797 Value *C = I.getTrueValue();
2798 Value *D = I.getFalseValue();
2799 Value *Sb = getShadow(B);
2800 Value *Sc = getShadow(C);
2801 Value *Sd = getShadow(D);
2802
2803 // Result shadow if condition shadow is 0.
2804 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2805 Value *Sa1;
2806 if (I.getType()->isAggregateType()) {
2807 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2808 // an extra "select". This results in much more compact IR.
2809 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2810 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2811 } else {
2812 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2813 // If Sb (condition is poisoned), look for bits in c and d that are equal
2814 // and both unpoisoned.
2815 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
2816
2817 // Cast arguments to shadow-compatible type.
2818 C = CreateAppToShadowCast(IRB, C);
2819 D = CreateAppToShadowCast(IRB, D);
2820
2821 // Result shadow if condition shadow is 1.
2822 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
2823 }
2824 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
2825 setShadow(&I, Sa);
2826 if (MS.TrackOrigins) {
2827 // Origins are always i32, so any vector conditions must be flattened.
2828 // FIXME: consider tracking vector origins for app vectors?
2829 if (B->getType()->isVectorTy()) {
2830 Type *FlatTy = getShadowTyNoVec(B->getType());
2831 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
2832 ConstantInt::getNullValue(FlatTy));
2833 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
2834 ConstantInt::getNullValue(FlatTy));
2835 }
2836 // a = select b, c, d
2837 // Oa = Sb ? Ob : (b ? Oc : Od)
2838 setOrigin(
2839 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
2840 IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
2841 getOrigin(I.getFalseValue()))));
2842 }
2843 }
2844
2845 void visitLandingPadInst(LandingPadInst &I) {
2846 // Do nothing.
2847 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2848 setShadow(&I, getCleanShadow(&I));
2849 setOrigin(&I, getCleanOrigin());
2850 }
2851
2852 void visitCatchSwitchInst(CatchSwitchInst &I) {
2853 setShadow(&I, getCleanShadow(&I));
2854 setOrigin(&I, getCleanOrigin());
2855 }
2856
2857 void visitFuncletPadInst(FuncletPadInst &I) {
2858 setShadow(&I, getCleanShadow(&I));
2859 setOrigin(&I, getCleanOrigin());
2860 }
2861
2862 void visitGetElementPtrInst(GetElementPtrInst &I) {
2863 handleShadowOr(I);
2864 }
2865
2866 void visitExtractValueInst(ExtractValueInst &I) {
2867 IRBuilder<> IRB(&I);
2868 Value *Agg = I.getAggregateOperand();
2869 DEBUG(dbgs() << "ExtractValue: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "ExtractValue: " << I <<
"\n"; } } while (false)
;
2870 Value *AggShadow = getShadow(Agg);
2871 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " AggShadow: " << *AggShadow
<< "\n"; } } while (false)
;
2872 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2873 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ResShadow: " << *ResShadow
<< "\n"; } } while (false)
;
2874 setShadow(&I, ResShadow);
2875 setOriginForNaryOp(I);
2876 }
2877
2878 void visitInsertValueInst(InsertValueInst &I) {
2879 IRBuilder<> IRB(&I);
2880 DEBUG(dbgs() << "InsertValue: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "InsertValue: " << I <<
"\n"; } } while (false)
;
2881 Value *AggShadow = getShadow(I.getAggregateOperand());
2882 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2883 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " AggShadow: " << *AggShadow
<< "\n"; } } while (false)
;
2884 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " InsShadow: " << *InsShadow
<< "\n"; } } while (false)
;
2885 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2886 DEBUG(dbgs() << " Res: " << *Res << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Res: " << *Res <<
"\n"; } } while (false)
;
2887 setShadow(&I, Res);
2888 setOriginForNaryOp(I);
2889 }
2890
2891 void dumpInst(Instruction &I) {
2892 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2893 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2894 } else {
2895 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2896 }
2897 errs() << "QQQ " << I << "\n";
2898 }
2899
2900 void visitResumeInst(ResumeInst &I) {
2901 DEBUG(dbgs() << "Resume: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Resume: " << I << "\n"
; } } while (false)
;
2902 // Nothing to do here.
2903 }
2904
2905 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
2906 DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "CleanupReturn: " << CRI <<
"\n"; } } while (false)
;
2907 // Nothing to do here.
2908 }
2909
2910 void visitCatchReturnInst(CatchReturnInst &CRI) {
2911 DEBUG(dbgs() << "CatchReturn: " << CRI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "CatchReturn: " << CRI <<
"\n"; } } while (false)
;
2912 // Nothing to do here.
2913 }
2914
2915 void visitInstruction(Instruction &I) {
2916 // Everything else: stop propagating and check for poisoned shadow.
2917 if (ClDumpStrictInstructions)
2918 dumpInst(I);
2919 DEBUG(dbgs() << "DEFAULT: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "DEFAULT: " << I << "\n"
; } } while (false)
;
2920 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2921 insertShadowCheck(I.getOperand(i), &I);
2922 setShadow(&I, getCleanShadow(&I));
2923 setOrigin(&I, getCleanOrigin());
2924 }
2925};
2926
2927/// \brief AMD64-specific implementation of VarArgHelper.
2928struct VarArgAMD64Helper : public VarArgHelper {
2929 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2930 // See a comment in visitCallSite for more details.
2931 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2932 static const unsigned AMD64FpEndOffset = 176;
2933
2934 Function &F;
2935 MemorySanitizer &MS;
2936 MemorySanitizerVisitor &MSV;
2937 Value *VAArgTLSCopy;
2938 Value *VAArgOverflowSize;
2939
2940 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2941
2942 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2943 MemorySanitizerVisitor &MSV)
2944 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2945 VAArgOverflowSize(nullptr) {}
2946
2947 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2948
2949 ArgKind classifyArgument(Value* arg) {
2950 // A very rough approximation of X86_64 argument classification rules.
2951 Type *T = arg->getType();
2952 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2953 return AK_FloatingPoint;
2954 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2955 return AK_GeneralPurpose;
2956 if (T->isPointerTy())
2957 return AK_GeneralPurpose;
2958 return AK_Memory;
2959 }
2960
2961 // For VarArg functions, store the argument shadow in an ABI-specific format
2962 // that corresponds to va_list layout.
2963 // We do this because Clang lowers va_arg in the frontend, and this pass
2964 // only sees the low level code that deals with va_list internals.
2965 // A much easier alternative (provided that Clang emits va_arg instructions)
2966 // would have been to associate each live instance of va_list with a copy of
2967 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2968 // order.
2969 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2970 unsigned GpOffset = 0;
2971 unsigned FpOffset = AMD64GpEndOffset;
2972 unsigned OverflowOffset = AMD64FpEndOffset;
2973 const DataLayout &DL = F.getParent()->getDataLayout();
2974 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2975 ArgIt != End; ++ArgIt) {
2976 Value *A = *ArgIt;
2977 unsigned ArgNo = CS.getArgumentNo(ArgIt);
2978 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
2979 bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
2980 if (IsByVal) {
2981 // ByVal arguments always go to the overflow area.
2982 // Fixed arguments passed through the overflow area will be stepped
2983 // over by va_start, so don't count them towards the offset.
2984 if (IsFixed)
2985 continue;
2986 assert(A->getType()->isPointerTy())((A->getType()->isPointerTy()) ? static_cast<void>
(0) : __assert_fail ("A->getType()->isPointerTy()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2986, __PRETTY_FUNCTION__))
;
2987 Type *RealTy = A->getType()->getPointerElementType();
2988 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
2989 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2990 OverflowOffset += alignTo(ArgSize, 8);
2991 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2992 ArgSize, kShadowTLSAlignment);
2993 } else {
2994 ArgKind AK = classifyArgument(A);
2995 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2996 AK = AK_Memory;
2997 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2998 AK = AK_Memory;
2999 Value *Base;
3000 switch (AK) {
3001 case AK_GeneralPurpose:
3002 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
3003 GpOffset += 8;
3004 break;
3005 case AK_FloatingPoint:
3006 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
3007 FpOffset += 16;
3008 break;
3009 case AK_Memory:
3010 if (IsFixed)
3011 continue;
3012 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3013 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3014 OverflowOffset += alignTo(ArgSize, 8);
3015 }
3016 // Take fixed arguments into account for GpOffset and FpOffset,
3017 // but don't actually store shadows for them.
3018 if (IsFixed)
3019 continue;
3020 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3021 }
3022 }
3023 Constant *OverflowSize =
3024 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
3025 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3026 }
3027
3028 /// \brief Compute the shadow address for a given va_arg.
3029 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3030 int ArgOffset) {
3031 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3032 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3033 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3034 "_msarg");
3035 }
3036
3037 void visitVAStartInst(VAStartInst &I) override {
3038 if (F.getCallingConv() == CallingConv::X86_64_Win64)
3039 return;
3040 IRBuilder<> IRB(&I);
3041 VAStartInstrumentationList.push_back(&I);
3042 Value *VAListTag = I.getArgOperand(0);
3043 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3044
3045 // Unpoison the whole __va_list_tag.
3046 // FIXME: magic ABI constants.
3047 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3048 /* size */24, /* alignment */8, false);
3049 }
3050
3051 void visitVACopyInst(VACopyInst &I) override {
3052 if (F.getCallingConv() == CallingConv::X86_64_Win64)
3053 return;
3054 IRBuilder<> IRB(&I);
3055 Value *VAListTag = I.getArgOperand(0);
3056 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3057
3058 // Unpoison the whole __va_list_tag.
3059 // FIXME: magic ABI constants.
3060 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3061 /* size */24, /* alignment */8, false);
3062 }
3063
3064 void finalizeInstrumentation() override {
3065 assert(!VAArgOverflowSize && !VAArgTLSCopy &&((!VAArgOverflowSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3066, __PRETTY_FUNCTION__))
3066 "finalizeInstrumentation called twice")((!VAArgOverflowSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3066, __PRETTY_FUNCTION__))
;
3067 if (!VAStartInstrumentationList.empty()) {
3068 // If there is a va_start in this function, make a backup copy of
3069 // va_arg_tls somewhere in the function entry block.
3070 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3071 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3072 Value *CopySize =
3073 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
3074 VAArgOverflowSize);
3075 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3076 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3077 }
3078
3079 // Instrument va_start.
3080 // Copy va_list shadow from the backup copy of the TLS contents.
3081 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3082 CallInst *OrigInst = VAStartInstrumentationList[i];
3083 IRBuilder<> IRB(OrigInst->getNextNode());
3084 Value *VAListTag = OrigInst->getArgOperand(0);
3085
3086 Value *RegSaveAreaPtrPtr =
3087 IRB.CreateIntToPtr(
3088 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3089 ConstantInt::get(MS.IntptrTy, 16)),
3090 Type::getInt64PtrTy(*MS.C));
3091 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3092 Value *RegSaveAreaShadowPtr =
3093 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3094 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
3095 AMD64FpEndOffset, 16);
3096
3097 Value *OverflowArgAreaPtrPtr =
3098 IRB.CreateIntToPtr(
3099 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3100 ConstantInt::get(MS.IntptrTy, 8)),
3101 Type::getInt64PtrTy(*MS.C));
3102 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
3103 Value *OverflowArgAreaShadowPtr =
3104 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
3105 Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
3106 AMD64FpEndOffset);
3107 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
3108 }
3109 }
3110};
3111
3112/// \brief MIPS64-specific implementation of VarArgHelper.
3113struct VarArgMIPS64Helper : public VarArgHelper {
3114 Function &F;
3115 MemorySanitizer &MS;
3116 MemorySanitizerVisitor &MSV;
3117 Value *VAArgTLSCopy;
3118 Value *VAArgSize;
3119
3120 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3121
3122 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
3123 MemorySanitizerVisitor &MSV)
3124 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3125 VAArgSize(nullptr) {}
3126
3127 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3128 unsigned VAArgOffset = 0;
3129 const DataLayout &DL = F.getParent()->getDataLayout();
3130 for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
3131 CS.getFunctionType()->getNumParams(), End = CS.arg_end();
3132 ArgIt != End; ++ArgIt) {
3133 llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
3134 Value *A = *ArgIt;
3135 Value *Base;
3136 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3137 if (TargetTriple.getArch() == llvm::Triple::mips64) {
3138 // Adjusting the shadow for argument with size < 8 to match the placement
3139 // of bits in big endian system
3140 if (ArgSize < 8)
3141 VAArgOffset += (8 - ArgSize);
3142 }
3143 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
3144 VAArgOffset += ArgSize;
3145 VAArgOffset = alignTo(VAArgOffset, 8);
3146 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3147 }
3148
3149 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
3150 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3151 // a new class member i.e. it is the total size of all VarArgs.
3152 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3153 }
3154
3155 /// \brief Compute the shadow address for a given va_arg.
3156 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3157 int ArgOffset) {
3158 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3159 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3160 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3161 "_msarg");
3162 }
3163
3164 void visitVAStartInst(VAStartInst &I) override {
3165 IRBuilder<> IRB(&I);
3166 VAStartInstrumentationList.push_back(&I);
3167 Value *VAListTag = I.getArgOperand(0);
3168 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3169 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3170 /* size */8, /* alignment */8, false);
3171 }
3172
3173 void visitVACopyInst(VACopyInst &I) override {
3174 IRBuilder<> IRB(&I);
3175 Value *VAListTag = I.getArgOperand(0);
3176 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3177 // Unpoison the whole __va_list_tag.
3178 // FIXME: magic ABI constants.
3179 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3180 /* size */8, /* alignment */8, false);
3181 }
3182
3183 void finalizeInstrumentation() override {
3184 assert(!VAArgSize && !VAArgTLSCopy &&((!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3185, __PRETTY_FUNCTION__))
3185 "finalizeInstrumentation called twice")((!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3185, __PRETTY_FUNCTION__))
;
3186 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3187 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3188 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3189 VAArgSize);
3190
3191 if (!VAStartInstrumentationList.empty()) {
3192 // If there is a va_start in this function, make a backup copy of
3193 // va_arg_tls somewhere in the function entry block.
3194 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3195 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3196 }
3197
3198 // Instrument va_start.
3199 // Copy va_list shadow from the backup copy of the TLS contents.
3200 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3201 CallInst *OrigInst = VAStartInstrumentationList[i];
3202 IRBuilder<> IRB(OrigInst->getNextNode());
3203 Value *VAListTag = OrigInst->getArgOperand(0);
3204 Value *RegSaveAreaPtrPtr =
3205 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3206 Type::getInt64PtrTy(*MS.C));
3207 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3208 Value *RegSaveAreaShadowPtr =
3209 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3210 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3211 }
3212 }
3213};
3214
3215
3216/// \brief AArch64-specific implementation of VarArgHelper.
3217struct VarArgAArch64Helper : public VarArgHelper {
3218 static const unsigned kAArch64GrArgSize = 64;
3219 static const unsigned kAArch64VrArgSize = 128;
3220
3221 static const unsigned AArch64GrBegOffset = 0;
3222 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
3223 // Make VR space aligned to 16 bytes.
3224 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
3225 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
3226 + kAArch64VrArgSize;
3227 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
3228
3229 Function &F;
3230 MemorySanitizer &MS;
3231 MemorySanitizerVisitor &MSV;
3232 Value *VAArgTLSCopy;
3233 Value *VAArgOverflowSize;
3234
3235 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3236
3237 VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
3238 MemorySanitizerVisitor &MSV)
3239 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3240 VAArgOverflowSize(nullptr) {}
3241
3242 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3243
3244 ArgKind classifyArgument(Value* arg) {
3245 Type *T = arg->getType();
3246 if (T->isFPOrFPVectorTy())
3247 return AK_FloatingPoint;
3248 if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3249 || (T->isPointerTy()))
3250 return AK_GeneralPurpose;
3251 return AK_Memory;
3252 }
3253
3254 // The instrumentation stores the argument shadow in a non ABI-specific
3255 // format because it does not know which argument is named (since Clang,
3256 // like x86_64 case, lowers the va_args in the frontend and this pass only
3257 // sees the low level code that deals with va_list internals).
3258 // The first seven GR registers are saved in the first 56 bytes of the
3259 // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
3260 // the remaining arguments.
3261 // Using constant offset within the va_arg TLS array allows fast copy
3262 // in the finalize instrumentation.
3263 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3264 unsigned GrOffset = AArch64GrBegOffset;
3265 unsigned VrOffset = AArch64VrBegOffset;
3266 unsigned OverflowOffset = AArch64VAEndOffset;
3267
3268 const DataLayout &DL = F.getParent()->getDataLayout();
3269 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3270 ArgIt != End; ++ArgIt) {
3271 Value *A = *ArgIt;
3272 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3273 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3274 ArgKind AK = classifyArgument(A);
3275 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
3276 AK = AK_Memory;
3277 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
3278 AK = AK_Memory;
3279 Value *Base;
3280 switch (AK) {
3281 case AK_GeneralPurpose:
3282 Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
3283 GrOffset += 8;
3284 break;
3285 case AK_FloatingPoint:
3286 Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
3287 VrOffset += 16;
3288 break;
3289 case AK_Memory:
3290 // Don't count fixed arguments in the overflow area - va_start will
3291 // skip right over them.
3292 if (IsFixed)
3293 continue;
3294 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3295 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3296 OverflowOffset += alignTo(ArgSize, 8);
3297 break;
3298 }
3299 // Count Gp/Vr fixed arguments to their respective offsets, but don't
3300 // bother to actually store a shadow.
3301 if (IsFixed)
3302 continue;
3303 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3304 }
3305 Constant *OverflowSize =
3306 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
3307 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3308 }
3309
3310 /// Compute the shadow address for a given va_arg.
3311 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3312 int ArgOffset) {
3313 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3314 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3315 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3316 "_msarg");
3317 }
3318
3319 void visitVAStartInst(VAStartInst &I) override {
3320 IRBuilder<> IRB(&I);
3321 VAStartInstrumentationList.push_back(&I);
3322 Value *VAListTag = I.getArgOperand(0);
3323 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3324 // Unpoison the whole __va_list_tag.
3325 // FIXME: magic ABI constants (size of va_list).
3326 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3327 /* size */32, /* alignment */8, false);
3328 }
3329
3330 void visitVACopyInst(VACopyInst &I) override {
3331 IRBuilder<> IRB(&I);
3332 Value *VAListTag = I.getArgOperand(0);
3333 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3334 // Unpoison the whole __va_list_tag.
3335 // FIXME: magic ABI constants (size of va_list).
3336 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3337 /* size */32, /* alignment */8, false);
3338 }
3339
3340 // Retrieve a va_list field of 'void*' size.
3341 Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3342 Value *SaveAreaPtrPtr =
3343 IRB.CreateIntToPtr(
3344 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3345 ConstantInt::get(MS.IntptrTy, offset)),
3346 Type::getInt64PtrTy(*MS.C));
3347 return IRB.CreateLoad(SaveAreaPtrPtr);
3348 }
3349
3350 // Retrieve a va_list field of 'int' size.
3351 Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3352 Value *SaveAreaPtr =
3353 IRB.CreateIntToPtr(
3354 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3355 ConstantInt::get(MS.IntptrTy, offset)),
3356 Type::getInt32PtrTy(*MS.C));
3357 Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
3358 return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
3359 }
3360
3361 void finalizeInstrumentation() override {
3362 assert(!VAArgOverflowSize && !VAArgTLSCopy &&((!VAArgOverflowSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3363, __PRETTY_FUNCTION__))
3363 "finalizeInstrumentation called twice")((!VAArgOverflowSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3363, __PRETTY_FUNCTION__))
;
3364 if (!VAStartInstrumentationList.empty()) {
3365 // If there is a va_start in this function, make a backup copy of
3366 // va_arg_tls somewhere in the function entry block.
3367 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3368 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3369 Value *CopySize =
3370 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
3371 VAArgOverflowSize);
3372 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3373 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3374 }
3375
3376 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
3377 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
3378
3379 // Instrument va_start, copy va_list shadow from the backup copy of
3380 // the TLS contents.
3381 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3382 CallInst *OrigInst = VAStartInstrumentationList[i];
3383 IRBuilder<> IRB(OrigInst->getNextNode());
3384
3385 Value *VAListTag = OrigInst->getArgOperand(0);
3386
3387 // The variadic ABI for AArch64 creates two areas to save the incoming
3388 // argument registers (one for 64-bit general register xn-x7 and another
3389 // for 128-bit FP/SIMD vn-v7).
3390 // We need then to propagate the shadow arguments on both regions
3391 // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
3392 // The remaning arguments are saved on shadow for 'va::stack'.
3393 // One caveat is it requires only to propagate the non-named arguments,
3394 // however on the call site instrumentation 'all' the arguments are
3395 // saved. So to copy the shadow values from the va_arg TLS array
3396 // we need to adjust the offset for both GR and VR fields based on
3397 // the __{gr,vr}_offs value (since they are stores based on incoming
3398 // named arguments).
3399
3400 // Read the stack pointer from the va_list.
3401 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
3402
3403 // Read both the __gr_top and __gr_off and add them up.
3404 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
3405 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
3406
3407 Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
3408
3409 // Read both the __vr_top and __vr_off and add them up.
3410 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
3411 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
3412
3413 Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
3414
3415 // It does not know how many named arguments is being used and, on the
3416 // callsite all the arguments were saved. Since __gr_off is defined as
3417 // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
3418 // argument by ignoring the bytes of shadow from named arguments.
3419 Value *GrRegSaveAreaShadowPtrOff =
3420 IRB.CreateAdd(GrArgSize, GrOffSaveArea);
3421
3422 Value *GrRegSaveAreaShadowPtr =
3423 MSV.getShadowPtr(GrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3424
3425 Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3426 GrRegSaveAreaShadowPtrOff);
3427 Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
3428
3429 IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8);
3430
3431 // Again, but for FP/SIMD values.
3432 Value *VrRegSaveAreaShadowPtrOff =
3433 IRB.CreateAdd(VrArgSize, VrOffSaveArea);
3434
3435 Value *VrRegSaveAreaShadowPtr =
3436 MSV.getShadowPtr(VrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3437
3438 Value *VrSrcPtr = IRB.CreateInBoundsGEP(
3439 IRB.getInt8Ty(),
3440 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3441 IRB.getInt32(AArch64VrBegOffset)),
3442 VrRegSaveAreaShadowPtrOff);
3443 Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
3444
3445 IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8);
3446
3447 // And finally for remaining arguments.
3448 Value *StackSaveAreaShadowPtr =
3449 MSV.getShadowPtr(StackSaveAreaPtr, IRB.getInt8Ty(), IRB);
3450
3451 Value *StackSrcPtr =
3452 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3453 IRB.getInt32(AArch64VAEndOffset));
3454
3455 IRB.CreateMemCpy(StackSaveAreaShadowPtr, StackSrcPtr,
3456 VAArgOverflowSize, 16);
3457 }
3458 }
3459};
3460
3461/// \brief PowerPC64-specific implementation of VarArgHelper.
3462struct VarArgPowerPC64Helper : public VarArgHelper {
3463 Function &F;
3464 MemorySanitizer &MS;
3465 MemorySanitizerVisitor &MSV;
3466 Value *VAArgTLSCopy;
3467 Value *VAArgSize;
3468
3469 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3470
3471 VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
3472 MemorySanitizerVisitor &MSV)
3473 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3474 VAArgSize(nullptr) {}
3475
3476 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3477 // For PowerPC, we need to deal with alignment of stack arguments -
3478 // they are mostly aligned to 8 bytes, but vectors and i128 arrays
3479 // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
3480 // and QPX vectors are aligned to 32 bytes. For that reason, we
3481 // compute current offset from stack pointer (which is always properly
3482 // aligned), and offset for the first vararg, then subtract them.
3483 unsigned VAArgBase;
3484 llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
3485 // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
3486 // and 32 bytes for ABIv2. This is usually determined by target
3487 // endianness, but in theory could be overriden by function attribute.
3488 // For simplicity, we ignore it here (it'd only matter for QPX vectors).
3489 if (TargetTriple.getArch() == llvm::Triple::ppc64)
3490 VAArgBase = 48;
3491 else
3492 VAArgBase = 32;
3493 unsigned VAArgOffset = VAArgBase;
3494 const DataLayout &DL = F.getParent()->getDataLayout();
3495 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3496 ArgIt != End; ++ArgIt) {
3497 Value *A = *ArgIt;
3498 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3499 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3500 bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
3501 if (IsByVal) {
3502 assert(A->getType()->isPointerTy())((A->getType()->isPointerTy()) ? static_cast<void>
(0) : __assert_fail ("A->getType()->isPointerTy()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3502, __PRETTY_FUNCTION__))
;
3503 Type *RealTy = A->getType()->getPointerElementType();
3504 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3505 uint64_t ArgAlign = CS.getParamAlignment(ArgNo);
3506 if (ArgAlign < 8)
3507 ArgAlign = 8;
3508 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3509 if (!IsFixed) {
3510 Value *Base = getShadowPtrForVAArgument(RealTy, IRB,
3511 VAArgOffset - VAArgBase);
3512 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
3513 ArgSize, kShadowTLSAlignment);
3514 }
3515 VAArgOffset += alignTo(ArgSize, 8);
3516 } else {
3517 Value *Base;
3518 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3519 uint64_t ArgAlign = 8;
3520 if (A->getType()->isArrayTy()) {
3521 // Arrays are aligned to element size, except for long double
3522 // arrays, which are aligned to 8 bytes.
3523 Type *ElementTy = A->getType()->getArrayElementType();
3524 if (!ElementTy->isPPC_FP128Ty())
3525 ArgAlign = DL.getTypeAllocSize(ElementTy);
3526 } else if (A->getType()->isVectorTy()) {
3527 // Vectors are naturally aligned.
3528 ArgAlign = DL.getTypeAllocSize(A->getType());
3529 }
3530 if (ArgAlign < 8)
3531 ArgAlign = 8;
3532 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3533 if (DL.isBigEndian()) {
3534 // Adjusting the shadow for argument with size < 8 to match the placement
3535 // of bits in big endian system
3536 if (ArgSize < 8)
3537 VAArgOffset += (8 - ArgSize);
3538 }
3539 if (!IsFixed) {
3540 Base = getShadowPtrForVAArgument(A->getType(), IRB,
3541 VAArgOffset - VAArgBase);
3542 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3543 }
3544 VAArgOffset += ArgSize;
3545 VAArgOffset = alignTo(VAArgOffset, 8);
3546 }
3547 if (IsFixed)
3548 VAArgBase = VAArgOffset;
3549 }
3550
3551 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
3552 VAArgOffset - VAArgBase);
3553 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3554 // a new class member i.e. it is the total size of all VarArgs.
3555 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3556 }
3557
3558 /// \brief Compute the shadow address for a given va_arg.
3559 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3560 int ArgOffset) {
3561 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3562 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3563 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3564 "_msarg");
3565 }
3566
3567 void visitVAStartInst(VAStartInst &I) override {
3568 IRBuilder<> IRB(&I);
3569 VAStartInstrumentationList.push_back(&I);
3570 Value *VAListTag = I.getArgOperand(0);
3571 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3572 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3573 /* size */8, /* alignment */8, false);
3574 }
3575
3576 void visitVACopyInst(VACopyInst &I) override {
3577 IRBuilder<> IRB(&I);
3578 Value *VAListTag = I.getArgOperand(0);
3579 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3580 // Unpoison the whole __va_list_tag.
3581 // FIXME: magic ABI constants.
3582 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3583 /* size */8, /* alignment */8, false);
3584 }
3585
3586 void finalizeInstrumentation() override {
3587 assert(!VAArgSize && !VAArgTLSCopy &&((!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3588, __PRETTY_FUNCTION__))
3588 "finalizeInstrumentation called twice")((!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"
) ? static_cast<void> (0) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3588, __PRETTY_FUNCTION__))
;
3589 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3590 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3591 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3592 VAArgSize);
3593
3594 if (!VAStartInstrumentationList.empty()) {
3595 // If there is a va_start in this function, make a backup copy of
3596 // va_arg_tls somewhere in the function entry block.
3597 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3598 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3599 }
3600
3601 // Instrument va_start.
3602 // Copy va_list shadow from the backup copy of the TLS contents.
3603 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3604 CallInst *OrigInst = VAStartInstrumentationList[i];
3605 IRBuilder<> IRB(OrigInst->getNextNode());
3606 Value *VAListTag = OrigInst->getArgOperand(0);
3607 Value *RegSaveAreaPtrPtr =
3608 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3609 Type::getInt64PtrTy(*MS.C));
3610 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3611 Value *RegSaveAreaShadowPtr =
3612 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3613 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3614 }
3615 }
3616};
3617
3618/// \brief A no-op implementation of VarArgHelper.
3619struct VarArgNoOpHelper : public VarArgHelper {
3620 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
3621 MemorySanitizerVisitor &MSV) {}
3622
3623 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
3624
3625 void visitVAStartInst(VAStartInst &I) override {}
3626
3627 void visitVACopyInst(VACopyInst &I) override {}
3628
3629 void finalizeInstrumentation() override {}
3630};
3631
3632VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
3633 MemorySanitizerVisitor &Visitor) {
3634 // VarArg handling is only implemented on AMD64. False positives are possible
3635 // on other platforms.
3636 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
3637 if (TargetTriple.getArch() == llvm::Triple::x86_64)
3638 return new VarArgAMD64Helper(Func, Msan, Visitor);
3639 else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
3640 TargetTriple.getArch() == llvm::Triple::mips64el)
3641 return new VarArgMIPS64Helper(Func, Msan, Visitor);
3642 else if (TargetTriple.getArch() == llvm::Triple::aarch64)
3643 return new VarArgAArch64Helper(Func, Msan, Visitor);
3644 else if (TargetTriple.getArch() == llvm::Triple::ppc64 ||
3645 TargetTriple.getArch() == llvm::Triple::ppc64le)
3646 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
3647 else
3648 return new VarArgNoOpHelper(Func, Msan, Visitor);
3649}
3650
3651} // anonymous namespace
3652
3653bool MemorySanitizer::runOnFunction(Function &F) {
3654 if (&F == MsanCtorFunction)
3655 return false;
3656 MemorySanitizerVisitor Visitor(F, *this);
3657
3658 // Clear out readonly/readnone attributes.
3659 AttrBuilder B;
3660 B.addAttribute(Attribute::ReadOnly)
3661 .addAttribute(Attribute::ReadNone);
3662 F.removeAttributes(AttributeList::FunctionIndex, B);
3663
3664 return Visitor.runOnFunction();
3665}