Bug Summary

File:lib/Transforms/Instrumentation/MemorySanitizer.cpp
Warning:line 2370, column 46
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MemorySanitizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Transforms/Instrumentation -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Transforms/Instrumentation -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp
1//===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This file is a part of MemorySanitizer, a detector of uninitialized
12/// reads.
13///
14/// The algorithm of the tool is similar to Memcheck
15/// (http://goo.gl/QKbem). We associate a few shadow bits with every
16/// byte of the application memory, poison the shadow of the malloc-ed
17/// or alloca-ed memory, load the shadow bits on every memory read,
18/// propagate the shadow bits through some of the arithmetic
19/// instruction (including MOV), store the shadow bits on every memory
20/// write, report a bug on some other instructions (e.g. JMP) if the
21/// associated shadow is poisoned.
22///
23/// But there are differences too. The first and the major one:
24/// compiler instrumentation instead of binary instrumentation. This
25/// gives us much better register allocation, possible compiler
26/// optimizations and a fast start-up. But this brings the major issue
27/// as well: msan needs to see all program events, including system
28/// calls and reads/writes in system libraries, so we either need to
29/// compile *everything* with msan or use a binary translation
30/// component (e.g. DynamoRIO) to instrument pre-built libraries.
31/// Another difference from Memcheck is that we use 8 shadow bits per
32/// byte of application memory and use a direct shadow mapping. This
33/// greatly simplifies the instrumentation code and avoids races on
34/// shadow updates (Memcheck is single-threaded so races are not a
35/// concern there. Memcheck uses 2 shadow bits per byte with a slow
36/// path storage that uses 8 bits per byte).
37///
38/// The default value of shadow is 0, which means "clean" (not poisoned).
39///
40/// Every module initializer should call __msan_init to ensure that the
41/// shadow memory is ready. On error, __msan_warning is called. Since
42/// parameters and return values may be passed via registers, we have a
43/// specialized thread-local shadow for return values
44/// (__msan_retval_tls) and parameters (__msan_param_tls).
45///
46/// Origin tracking.
47///
48/// MemorySanitizer can track origins (allocation points) of all uninitialized
49/// values. This behavior is controlled with a flag (msan-track-origins) and is
50/// disabled by default.
51///
52/// Origins are 4-byte values created and interpreted by the runtime library.
53/// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
54/// of application memory. Propagation of origins is basically a bunch of
55/// "select" instructions that pick the origin of a dirty argument, if an
56/// instruction has one.
57///
58/// Every 4 aligned, consecutive bytes of application memory have one origin
59/// value associated with them. If these bytes contain uninitialized data
60/// coming from 2 different allocations, the last store wins. Because of this,
61/// MemorySanitizer reports can show unrelated origins, but this is unlikely in
62/// practice.
63///
64/// Origins are meaningless for fully initialized values, so MemorySanitizer
65/// avoids storing origin to memory when a fully initialized value is stored.
66/// This way it avoids needless overwritting origin of the 4-byte region on
67/// a short (i.e. 1 byte) clean store, and it is also good for performance.
68///
69/// Atomic handling.
70///
71/// Ideally, every atomic store of application value should update the
72/// corresponding shadow location in an atomic way. Unfortunately, atomic store
73/// of two disjoint locations can not be done without severe slowdown.
74///
75/// Therefore, we implement an approximation that may err on the safe side.
76/// In this implementation, every atomically accessed location in the program
77/// may only change from (partially) uninitialized to fully initialized, but
78/// not the other way around. We load the shadow _after_ the application load,
79/// and we store the shadow _before_ the app store. Also, we always store clean
80/// shadow (if the application store is atomic). This way, if the store-load
81/// pair constitutes a happens-before arc, shadow store and load are correctly
82/// ordered such that the load will get either the value that was stored, or
83/// some later value (which is always clean).
84///
85/// This does not work very well with Compare-And-Swap (CAS) and
86/// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
87/// must store the new shadow before the app operation, and load the shadow
88/// after the app operation. Computers don't work this way. Current
89/// implementation ignores the load aspect of CAS/RMW, always returning a clean
90/// value. It implements the store part as a simple atomic store by storing a
91/// clean shadow.
92//
93//===----------------------------------------------------------------------===//
94
95#include "llvm/ADT/APInt.h"
96#include "llvm/ADT/ArrayRef.h"
97#include "llvm/ADT/DepthFirstIterator.h"
98#include "llvm/ADT/SmallString.h"
99#include "llvm/ADT/SmallVector.h"
100#include "llvm/ADT/StringExtras.h"
101#include "llvm/ADT/StringRef.h"
102#include "llvm/ADT/Triple.h"
103#include "llvm/Analysis/TargetLibraryInfo.h"
104#include "llvm/Analysis/Utils/Local.h"
105#include "llvm/IR/Argument.h"
106#include "llvm/IR/Attributes.h"
107#include "llvm/IR/BasicBlock.h"
108#include "llvm/IR/CallSite.h"
109#include "llvm/IR/CallingConv.h"
110#include "llvm/IR/Constant.h"
111#include "llvm/IR/Constants.h"
112#include "llvm/IR/DataLayout.h"
113#include "llvm/IR/DerivedTypes.h"
114#include "llvm/IR/Function.h"
115#include "llvm/IR/GlobalValue.h"
116#include "llvm/IR/GlobalVariable.h"
117#include "llvm/IR/IRBuilder.h"
118#include "llvm/IR/InlineAsm.h"
119#include "llvm/IR/InstVisitor.h"
120#include "llvm/IR/InstrTypes.h"
121#include "llvm/IR/Instruction.h"
122#include "llvm/IR/Instructions.h"
123#include "llvm/IR/IntrinsicInst.h"
124#include "llvm/IR/Intrinsics.h"
125#include "llvm/IR/LLVMContext.h"
126#include "llvm/IR/MDBuilder.h"
127#include "llvm/IR/Module.h"
128#include "llvm/IR/Type.h"
129#include "llvm/IR/Value.h"
130#include "llvm/IR/ValueMap.h"
131#include "llvm/Pass.h"
132#include "llvm/Support/AtomicOrdering.h"
133#include "llvm/Support/Casting.h"
134#include "llvm/Support/CommandLine.h"
135#include "llvm/Support/Compiler.h"
136#include "llvm/Support/Debug.h"
137#include "llvm/Support/ErrorHandling.h"
138#include "llvm/Support/MathExtras.h"
139#include "llvm/Support/raw_ostream.h"
140#include "llvm/Transforms/Instrumentation.h"
141#include "llvm/Transforms/Utils/BasicBlockUtils.h"
142#include "llvm/Transforms/Utils/ModuleUtils.h"
143#include <algorithm>
144#include <cassert>
145#include <cstddef>
146#include <cstdint>
147#include <memory>
148#include <string>
149#include <tuple>
150
151using namespace llvm;
152
153#define DEBUG_TYPE"msan" "msan"
154
155static const unsigned kOriginSize = 4;
156static const unsigned kMinOriginAlignment = 4;
157static const unsigned kShadowTLSAlignment = 8;
158
159// These constants must be kept in sync with the ones in msan.h.
160static const unsigned kParamTLSSize = 800;
161static const unsigned kRetvalTLSSize = 800;
162
163// Accesses sizes are powers of two: 1, 2, 4, 8.
164static const size_t kNumberOfAccessSizes = 4;
165
166/// \brief Track origins of uninitialized values.
167///
168/// Adds a section to MemorySanitizer report that points to the allocation
169/// (stack or heap) the uninitialized bits came from originally.
170static cl::opt<int> ClTrackOrigins("msan-track-origins",
171 cl::desc("Track origins (allocation sites) of poisoned memory"),
172 cl::Hidden, cl::init(0));
173
174static cl::opt<bool> ClKeepGoing("msan-keep-going",
175 cl::desc("keep going after reporting a UMR"),
176 cl::Hidden, cl::init(false));
177
178static cl::opt<bool> ClPoisonStack("msan-poison-stack",
179 cl::desc("poison uninitialized stack variables"),
180 cl::Hidden, cl::init(true));
181
182static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
183 cl::desc("poison uninitialized stack variables with a call"),
184 cl::Hidden, cl::init(false));
185
186static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
187 cl::desc("poison uninitialized stack variables with the given pattern"),
188 cl::Hidden, cl::init(0xff));
189
190static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
191 cl::desc("poison undef temps"),
192 cl::Hidden, cl::init(true));
193
194static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
195 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
196 cl::Hidden, cl::init(true));
197
198static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
199 cl::desc("exact handling of relational integer ICmp"),
200 cl::Hidden, cl::init(false));
201
202// When compiling the Linux kernel, we sometimes see false positives related to
203// MSan being unable to understand that inline assembly calls may initialize
204// local variables.
205// This flag makes the compiler conservatively unpoison every memory location
206// passed into an assembly call. Note that this may cause false positives.
207// Because it's impossible to figure out the array sizes, we can only unpoison
208// the first sizeof(type) bytes for each type* pointer.
209static cl::opt<bool> ClHandleAsmConservative(
210 "msan-handle-asm-conservative",
211 cl::desc("conservative handling of inline assembly"), cl::Hidden,
212 cl::init(false));
213
214// This flag controls whether we check the shadow of the address
215// operand of load or store. Such bugs are very rare, since load from
216// a garbage address typically results in SEGV, but still happen
217// (e.g. only lower bits of address are garbage, or the access happens
218// early at program startup where malloc-ed memory is more likely to
219// be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
220static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
221 cl::desc("report accesses through a pointer which has poisoned shadow"),
222 cl::Hidden, cl::init(true));
223
224static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
225 cl::desc("print out instructions with default strict semantics"),
226 cl::Hidden, cl::init(false));
227
228static cl::opt<int> ClInstrumentationWithCallThreshold(
229 "msan-instrumentation-with-call-threshold",
230 cl::desc(
231 "If the function being instrumented requires more than "
232 "this number of checks and origin stores, use callbacks instead of "
233 "inline checks (-1 means never use callbacks)."),
234 cl::Hidden, cl::init(3500));
235
236// This is an experiment to enable handling of cases where shadow is a non-zero
237// compile-time constant. For some unexplainable reason they were silently
238// ignored in the instrumentation.
239static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
240 cl::desc("Insert checks for constant shadow values"),
241 cl::Hidden, cl::init(false));
242
243// This is off by default because of a bug in gold:
244// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
245static cl::opt<bool> ClWithComdat("msan-with-comdat",
246 cl::desc("Place MSan constructors in comdat sections"),
247 cl::Hidden, cl::init(false));
248
249// These options allow to specify custom memory map parameters
250// See MemoryMapParams for details.
251static cl::opt<unsigned long long> ClAndMask("msan-and-mask",
252 cl::desc("Define custom MSan AndMask"),
253 cl::Hidden, cl::init(0));
254
255static cl::opt<unsigned long long> ClXorMask("msan-xor-mask",
256 cl::desc("Define custom MSan XorMask"),
257 cl::Hidden, cl::init(0));
258
259static cl::opt<unsigned long long> ClShadowBase("msan-shadow-base",
260 cl::desc("Define custom MSan ShadowBase"),
261 cl::Hidden, cl::init(0));
262
263static cl::opt<unsigned long long> ClOriginBase("msan-origin-base",
264 cl::desc("Define custom MSan OriginBase"),
265 cl::Hidden, cl::init(0));
266
267static const char *const kMsanModuleCtorName = "msan.module_ctor";
268static const char *const kMsanInitName = "__msan_init";
269
270namespace {
271
272// Memory map parameters used in application-to-shadow address calculation.
273// Offset = (Addr & ~AndMask) ^ XorMask
274// Shadow = ShadowBase + Offset
275// Origin = OriginBase + Offset
276struct MemoryMapParams {
277 uint64_t AndMask;
278 uint64_t XorMask;
279 uint64_t ShadowBase;
280 uint64_t OriginBase;
281};
282
283struct PlatformMemoryMapParams {
284 const MemoryMapParams *bits32;
285 const MemoryMapParams *bits64;
286};
287
288} // end anonymous namespace
289
290// i386 Linux
291static const MemoryMapParams Linux_I386_MemoryMapParams = {
292 0x000080000000, // AndMask
293 0, // XorMask (not used)
294 0, // ShadowBase (not used)
295 0x000040000000, // OriginBase
296};
297
298// x86_64 Linux
299static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
300#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
301 0x400000000000, // AndMask
302 0, // XorMask (not used)
303 0, // ShadowBase (not used)
304 0x200000000000, // OriginBase
305#else
306 0, // AndMask (not used)
307 0x500000000000, // XorMask
308 0, // ShadowBase (not used)
309 0x100000000000, // OriginBase
310#endif
311};
312
313// mips64 Linux
314static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
315 0, // AndMask (not used)
316 0x008000000000, // XorMask
317 0, // ShadowBase (not used)
318 0x002000000000, // OriginBase
319};
320
321// ppc64 Linux
322static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
323 0xE00000000000, // AndMask
324 0x100000000000, // XorMask
325 0x080000000000, // ShadowBase
326 0x1C0000000000, // OriginBase
327};
328
329// aarch64 Linux
330static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
331 0, // AndMask (not used)
332 0x06000000000, // XorMask
333 0, // ShadowBase (not used)
334 0x01000000000, // OriginBase
335};
336
337// i386 FreeBSD
338static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
339 0x000180000000, // AndMask
340 0x000040000000, // XorMask
341 0x000020000000, // ShadowBase
342 0x000700000000, // OriginBase
343};
344
345// x86_64 FreeBSD
346static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
347 0xc00000000000, // AndMask
348 0x200000000000, // XorMask
349 0x100000000000, // ShadowBase
350 0x380000000000, // OriginBase
351};
352
353// x86_64 NetBSD
354static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
355 0, // AndMask
356 0x500000000000, // XorMask
357 0, // ShadowBase
358 0x100000000000, // OriginBase
359};
360
361static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
362 &Linux_I386_MemoryMapParams,
363 &Linux_X86_64_MemoryMapParams,
364};
365
366static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
367 nullptr,
368 &Linux_MIPS64_MemoryMapParams,
369};
370
371static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
372 nullptr,
373 &Linux_PowerPC64_MemoryMapParams,
374};
375
376static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
377 nullptr,
378 &Linux_AArch64_MemoryMapParams,
379};
380
381static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
382 &FreeBSD_I386_MemoryMapParams,
383 &FreeBSD_X86_64_MemoryMapParams,
384};
385
386static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
387 nullptr,
388 &NetBSD_X86_64_MemoryMapParams,
389};
390
391namespace {
392
393/// \brief An instrumentation pass implementing detection of uninitialized
394/// reads.
395///
396/// MemorySanitizer: instrument the code in module to find
397/// uninitialized reads.
398class MemorySanitizer : public FunctionPass {
399public:
400 // Pass identification, replacement for typeid.
401 static char ID;
402
403 MemorySanitizer(int TrackOrigins = 0, bool Recover = false)
404 : FunctionPass(ID),
405 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
406 Recover(Recover || ClKeepGoing) {}
407
408 StringRef getPassName() const override { return "MemorySanitizer"; }
409
410 void getAnalysisUsage(AnalysisUsage &AU) const override {
411 AU.addRequired<TargetLibraryInfoWrapperPass>();
412 }
413
414 bool runOnFunction(Function &F) override;
415 bool doInitialization(Module &M) override;
416
417private:
418 friend struct MemorySanitizerVisitor;
419 friend struct VarArgAMD64Helper;
420 friend struct VarArgMIPS64Helper;
421 friend struct VarArgAArch64Helper;
422 friend struct VarArgPowerPC64Helper;
423
424 void initializeCallbacks(Module &M);
425
426 /// \brief Track origins (allocation points) of uninitialized values.
427 int TrackOrigins;
428 bool Recover;
429
430 LLVMContext *C;
431 Type *IntptrTy;
432 Type *OriginTy;
433
434 /// \brief Thread-local shadow storage for function parameters.
435 GlobalVariable *ParamTLS;
436
437 /// \brief Thread-local origin storage for function parameters.
438 GlobalVariable *ParamOriginTLS;
439
440 /// \brief Thread-local shadow storage for function return value.
441 GlobalVariable *RetvalTLS;
442
443 /// \brief Thread-local origin storage for function return value.
444 GlobalVariable *RetvalOriginTLS;
445
446 /// \brief Thread-local shadow storage for in-register va_arg function
447 /// parameters (x86_64-specific).
448 GlobalVariable *VAArgTLS;
449
450 /// \brief Thread-local shadow storage for va_arg overflow area
451 /// (x86_64-specific).
452 GlobalVariable *VAArgOverflowSizeTLS;
453
454 /// \brief Thread-local space used to pass origin value to the UMR reporting
455 /// function.
456 GlobalVariable *OriginTLS;
457
458 /// \brief The run-time callback to print a warning.
459 Value *WarningFn = nullptr;
460
461 // These arrays are indexed by log2(AccessSize).
462 Value *MaybeWarningFn[kNumberOfAccessSizes];
463 Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
464
465 /// \brief Run-time helper that generates a new origin value for a stack
466 /// allocation.
467 Value *MsanSetAllocaOrigin4Fn;
468
469 /// \brief Run-time helper that poisons stack on function entry.
470 Value *MsanPoisonStackFn;
471
472 /// \brief Run-time helper that records a store (or any event) of an
473 /// uninitialized value and returns an updated origin id encoding this info.
474 Value *MsanChainOriginFn;
475
476 /// \brief MSan runtime replacements for memmove, memcpy and memset.
477 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
478
479 /// \brief Memory map parameters used in application-to-shadow calculation.
480 const MemoryMapParams *MapParams;
481
482 /// \brief Custom memory map parameters used when -msan-shadow-base or
483 // -msan-origin-base is provided.
484 MemoryMapParams CustomMapParams;
485
486 MDNode *ColdCallWeights;
487
488 /// \brief Branch weights for origin store.
489 MDNode *OriginStoreWeights;
490
491 /// \brief An empty volatile inline asm that prevents callback merge.
492 InlineAsm *EmptyAsm;
493
494 Function *MsanCtorFunction;
495};
496
497} // end anonymous namespace
498
499char MemorySanitizer::ID = 0;
500
501INITIALIZE_PASS_BEGIN(static void *initializeMemorySanitizerPassOnce(PassRegistry &
Registry) {
502 MemorySanitizer, "msan",static void *initializeMemorySanitizerPassOnce(PassRegistry &
Registry) {
503 "MemorySanitizer: detects uninitialized reads.", false, false)static void *initializeMemorySanitizerPassOnce(PassRegistry &
Registry) {
504INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
505INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "MemorySanitizer: detects uninitialized reads."
, "msan", &MemorySanitizer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySanitizer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySanitizerPassFlag
; void llvm::initializeMemorySanitizerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySanitizerPassFlag
, initializeMemorySanitizerPassOnce, std::ref(Registry)); }
506 MemorySanitizer, "msan",PassInfo *PI = new PassInfo( "MemorySanitizer: detects uninitialized reads."
, "msan", &MemorySanitizer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySanitizer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySanitizerPassFlag
; void llvm::initializeMemorySanitizerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySanitizerPassFlag
, initializeMemorySanitizerPassOnce, std::ref(Registry)); }
507 "MemorySanitizer: detects uninitialized reads.", false, false)PassInfo *PI = new PassInfo( "MemorySanitizer: detects uninitialized reads."
, "msan", &MemorySanitizer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySanitizer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySanitizerPassFlag
; void llvm::initializeMemorySanitizerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySanitizerPassFlag
, initializeMemorySanitizerPassOnce, std::ref(Registry)); }
508
509FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) {
510 return new MemorySanitizer(TrackOrigins, Recover);
511}
512
513/// \brief Create a non-const global initialized with the given string.
514///
515/// Creates a writable global for Str so that we can pass it to the
516/// run-time lib. Runtime uses first 4 bytes of the string to store the
517/// frame ID, so the string needs to be mutable.
518static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
519 StringRef Str) {
520 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
521 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
522 GlobalValue::PrivateLinkage, StrConst, "");
523}
524
525/// \brief Insert extern declaration of runtime-provided functions and globals.
526void MemorySanitizer::initializeCallbacks(Module &M) {
527 // Only do this once.
528 if (WarningFn)
529 return;
530
531 IRBuilder<> IRB(*C);
532 // Create the callback.
533 // FIXME: this function should have "Cold" calling conv,
534 // which is not yet implemented.
535 StringRef WarningFnName = Recover ? "__msan_warning"
536 : "__msan_warning_noreturn";
537 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
538
539 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
540 AccessSizeIndex++) {
541 unsigned AccessSize = 1 << AccessSizeIndex;
542 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
543 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
544 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
545 IRB.getInt32Ty());
546
547 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
548 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
549 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
550 IRB.getInt8PtrTy(), IRB.getInt32Ty());
551 }
552
553 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
554 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
555 IRB.getInt8PtrTy(), IntptrTy);
556 MsanPoisonStackFn =
557 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
558 IRB.getInt8PtrTy(), IntptrTy);
559 MsanChainOriginFn = M.getOrInsertFunction(
560 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
561 MemmoveFn = M.getOrInsertFunction(
562 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
563 IRB.getInt8PtrTy(), IntptrTy);
564 MemcpyFn = M.getOrInsertFunction(
565 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
566 IntptrTy);
567 MemsetFn = M.getOrInsertFunction(
568 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
569 IntptrTy);
570
571 // Create globals.
572 RetvalTLS = new GlobalVariable(
573 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
574 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
575 GlobalVariable::InitialExecTLSModel);
576 RetvalOriginTLS = new GlobalVariable(
577 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
578 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
579
580 ParamTLS = new GlobalVariable(
581 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
582 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
583 GlobalVariable::InitialExecTLSModel);
584 ParamOriginTLS = new GlobalVariable(
585 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
586 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
587 nullptr, GlobalVariable::InitialExecTLSModel);
588
589 VAArgTLS = new GlobalVariable(
590 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
591 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
592 GlobalVariable::InitialExecTLSModel);
593 VAArgOverflowSizeTLS = new GlobalVariable(
594 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
595 "__msan_va_arg_overflow_size_tls", nullptr,
596 GlobalVariable::InitialExecTLSModel);
597 OriginTLS = new GlobalVariable(
598 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
599 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
600
601 // We insert an empty inline asm after __msan_report* to avoid callback merge.
602 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
603 StringRef(""), StringRef(""),
604 /*hasSideEffects=*/true);
605}
606
607/// \brief Module-level initialization.
608///
609/// inserts a call to __msan_init to the module's constructor list.
610bool MemorySanitizer::doInitialization(Module &M) {
611 auto &DL = M.getDataLayout();
612
613 bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
614 bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
615 // Check the overrides first
616 if (ShadowPassed || OriginPassed) {
617 CustomMapParams.AndMask = ClAndMask;
618 CustomMapParams.XorMask = ClXorMask;
619 CustomMapParams.ShadowBase = ClShadowBase;
620 CustomMapParams.OriginBase = ClOriginBase;
621 MapParams = &CustomMapParams;
622 } else {
623 Triple TargetTriple(M.getTargetTriple());
624 switch (TargetTriple.getOS()) {
625 case Triple::FreeBSD:
626 switch (TargetTriple.getArch()) {
627 case Triple::x86_64:
628 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
629 break;
630 case Triple::x86:
631 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
632 break;
633 default:
634 report_fatal_error("unsupported architecture");
635 }
636 break;
637 case Triple::NetBSD:
638 switch (TargetTriple.getArch()) {
639 case Triple::x86_64:
640 MapParams = NetBSD_X86_MemoryMapParams.bits64;
641 break;
642 default:
643 report_fatal_error("unsupported architecture");
644 }
645 break;
646 case Triple::Linux:
647 switch (TargetTriple.getArch()) {
648 case Triple::x86_64:
649 MapParams = Linux_X86_MemoryMapParams.bits64;
650 break;
651 case Triple::x86:
652 MapParams = Linux_X86_MemoryMapParams.bits32;
653 break;
654 case Triple::mips64:
655 case Triple::mips64el:
656 MapParams = Linux_MIPS_MemoryMapParams.bits64;
657 break;
658 case Triple::ppc64:
659 case Triple::ppc64le:
660 MapParams = Linux_PowerPC_MemoryMapParams.bits64;
661 break;
662 case Triple::aarch64:
663 case Triple::aarch64_be:
664 MapParams = Linux_ARM_MemoryMapParams.bits64;
665 break;
666 default:
667 report_fatal_error("unsupported architecture");
668 }
669 break;
670 default:
671 report_fatal_error("unsupported operating system");
672 }
673 }
674
675 C = &(M.getContext());
676 IRBuilder<> IRB(*C);
677 IntptrTy = IRB.getIntPtrTy(DL);
678 OriginTy = IRB.getInt32Ty();
679
680 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
681 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
682
683 std::tie(MsanCtorFunction, std::ignore) =
684 createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
685 /*InitArgTypes=*/{},
686 /*InitArgs=*/{});
687 if (ClWithComdat) {
688 Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
689 MsanCtorFunction->setComdat(MsanCtorComdat);
690 appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction);
691 } else {
692 appendToGlobalCtors(M, MsanCtorFunction, 0);
693 }
694
695
696 if (TrackOrigins)
697 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
698 IRB.getInt32(TrackOrigins), "__msan_track_origins");
699
700 if (Recover)
701 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
702 IRB.getInt32(Recover), "__msan_keep_going");
703
704 return true;
705}
706
707namespace {
708
709/// \brief A helper class that handles instrumentation of VarArg
710/// functions on a particular platform.
711///
712/// Implementations are expected to insert the instrumentation
713/// necessary to propagate argument shadow through VarArg function
714/// calls. Visit* methods are called during an InstVisitor pass over
715/// the function, and should avoid creating new basic blocks. A new
716/// instance of this class is created for each instrumented function.
717struct VarArgHelper {
718 virtual ~VarArgHelper() = default;
719
720 /// \brief Visit a CallSite.
721 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
722
723 /// \brief Visit a va_start call.
724 virtual void visitVAStartInst(VAStartInst &I) = 0;
725
726 /// \brief Visit a va_copy call.
727 virtual void visitVACopyInst(VACopyInst &I) = 0;
728
729 /// \brief Finalize function instrumentation.
730 ///
731 /// This method is called after visiting all interesting (see above)
732 /// instructions in a function.
733 virtual void finalizeInstrumentation() = 0;
734};
735
736struct MemorySanitizerVisitor;
737
738} // end anonymous namespace
739
740static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
741 MemorySanitizerVisitor &Visitor);
742
743static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
744 if (TypeSize <= 8) return 0;
745 return Log2_32_Ceil((TypeSize + 7) / 8);
746}
747
748namespace {
749
750/// This class does all the work for a given function. Store and Load
751/// instructions store and load corresponding shadow and origin
752/// values. Most instructions propagate shadow from arguments to their
753/// return values. Certain instructions (most importantly, BranchInst)
754/// test their argument shadow and print reports (with a runtime call) if it's
755/// non-zero.
756struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
757 Function &F;
758 MemorySanitizer &MS;
759 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
760 ValueMap<Value*, Value*> ShadowMap, OriginMap;
761 std::unique_ptr<VarArgHelper> VAHelper;
762 const TargetLibraryInfo *TLI;
763 BasicBlock *ActualFnStart;
764
765 // The following flags disable parts of MSan instrumentation based on
766 // blacklist contents and command-line options.
767 bool InsertChecks;
768 bool PropagateShadow;
769 bool PoisonStack;
770 bool PoisonUndef;
771 bool CheckReturnValue;
772
773 struct ShadowOriginAndInsertPoint {
774 Value *Shadow;
775 Value *Origin;
776 Instruction *OrigIns;
777
778 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
779 : Shadow(S), Origin(O), OrigIns(I) {}
780 };
781 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
782 SmallVector<StoreInst *, 16> StoreList;
783
784 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
785 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
786 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
787 InsertChecks = SanitizeFunction;
788 PropagateShadow = SanitizeFunction;
789 PoisonStack = SanitizeFunction && ClPoisonStack;
790 PoisonUndef = SanitizeFunction && ClPoisonUndef;
791 // FIXME: Consider using SpecialCaseList to specify a list of functions that
792 // must always return fully initialized values. For now, we hardcode "main".
793 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
794 TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
795
796 MS.initializeCallbacks(*F.getParent());
797 ActualFnStart = &F.getEntryBlock();
798
799 DEBUG(if (!InsertChecks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n"; } } while (false)
800 dbgs() << "MemorySanitizer is not inserting checks into '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n"; } } while (false)
801 << F.getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n"; } } while (false)
;
802 }
803
804 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
805 if (MS.TrackOrigins <= 1) return V;
806 return IRB.CreateCall(MS.MsanChainOriginFn, V);
807 }
808
809 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
810 const DataLayout &DL = F.getParent()->getDataLayout();
811 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
812 if (IntptrSize == kOriginSize) return Origin;
813 assert(IntptrSize == kOriginSize * 2)(static_cast <bool> (IntptrSize == kOriginSize * 2) ? void
(0) : __assert_fail ("IntptrSize == kOriginSize * 2", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 813, __extension__ __PRETTY_FUNCTION__))
;
814 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
815 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
816 }
817
818 /// \brief Fill memory range with the given origin value.
819 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
820 unsigned Size, unsigned Alignment) {
821 const DataLayout &DL = F.getParent()->getDataLayout();
822 unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
823 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
824 assert(IntptrAlignment >= kMinOriginAlignment)(static_cast <bool> (IntptrAlignment >= kMinOriginAlignment
) ? void (0) : __assert_fail ("IntptrAlignment >= kMinOriginAlignment"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 824, __extension__ __PRETTY_FUNCTION__))
;
825 assert(IntptrSize >= kOriginSize)(static_cast <bool> (IntptrSize >= kOriginSize) ? void
(0) : __assert_fail ("IntptrSize >= kOriginSize", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 825, __extension__ __PRETTY_FUNCTION__))
;
826
827 unsigned Ofs = 0;
828 unsigned CurrentAlignment = Alignment;
829 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
830 Value *IntptrOrigin = originToIntptr(IRB, Origin);
831 Value *IntptrOriginPtr =
832 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
833 for (unsigned i = 0; i < Size / IntptrSize; ++i) {
834 Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
835 : IntptrOriginPtr;
836 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
837 Ofs += IntptrSize / kOriginSize;
838 CurrentAlignment = IntptrAlignment;
839 }
840 }
841
842 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
843 Value *GEP =
844 i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
845 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
846 CurrentAlignment = kMinOriginAlignment;
847 }
848 }
849
850 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
851 Value *OriginPtr, unsigned Alignment, bool AsCall) {
852 const DataLayout &DL = F.getParent()->getDataLayout();
853 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
854 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
855 if (Shadow->getType()->isAggregateType()) {
856 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
857 OriginAlignment);
858 } else {
859 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
860 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
861 if (ConstantShadow) {
862 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
863 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
864 OriginAlignment);
865 return;
866 }
867
868 unsigned TypeSizeInBits =
869 DL.getTypeSizeInBits(ConvertedShadow->getType());
870 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
871 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
872 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
873 Value *ConvertedShadow2 = IRB.CreateZExt(
874 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
875 IRB.CreateCall(Fn, {ConvertedShadow2,
876 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
877 Origin});
878 } else {
879 Value *Cmp = IRB.CreateICmpNE(
880 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
881 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
882 Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
883 IRBuilder<> IRBNew(CheckTerm);
884 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
885 OriginAlignment);
886 }
887 }
888 }
889
890 void materializeStores(bool InstrumentWithCalls) {
891 for (StoreInst *SI : StoreList) {
892 IRBuilder<> IRB(SI);
893 Value *Val = SI->getValueOperand();
894 Value *Addr = SI->getPointerOperand();
895 Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
896 Value *ShadowPtr, *OriginPtr;
897 Type *ShadowTy = Shadow->getType();
898 unsigned Alignment = SI->getAlignment();
899 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
900 std::tie(ShadowPtr, OriginPtr) =
901 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
902
903 StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
904 DEBUG(dbgs() << " STORE: " << *NewSI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " STORE: " << *NewSI <<
"\n"; } } while (false)
;
905
906 if (ClCheckAccessAddress)
907 insertShadowCheck(Addr, NewSI);
908
909 if (SI->isAtomic())
910 SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
911
912 if (MS.TrackOrigins && !SI->isAtomic())
913 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
914 OriginAlignment, InstrumentWithCalls);
915 }
916 }
917
918 /// \brief Helper function to insert a warning at IRB's current insert point.
919 void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
920 if (!Origin)
921 Origin = (Value *)IRB.getInt32(0);
922 if (MS.TrackOrigins) {
923 IRB.CreateStore(Origin, MS.OriginTLS);
924 }
925 IRB.CreateCall(MS.WarningFn, {});
926 IRB.CreateCall(MS.EmptyAsm, {});
927 // FIXME: Insert UnreachableInst if !MS.Recover?
928 // This may invalidate some of the following checks and needs to be done
929 // at the very end.
930 }
931
932 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
933 bool AsCall) {
934 IRBuilder<> IRB(OrigIns);
935 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " SHAD0 : " << *Shadow <<
"\n"; } } while (false)
;
936 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
937 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " SHAD1 : " << *ConvertedShadow
<< "\n"; } } while (false)
;
938
939 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
940 if (ConstantShadow) {
941 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
942 insertWarningFn(IRB, Origin);
943 }
944 return;
945 }
946
947 const DataLayout &DL = OrigIns->getModule()->getDataLayout();
948
949 unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
950 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
951 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
952 Value *Fn = MS.MaybeWarningFn[SizeIndex];
953 Value *ConvertedShadow2 =
954 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
955 IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
956 ? Origin
957 : (Value *)IRB.getInt32(0)});
958 } else {
959 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
960 getCleanShadow(ConvertedShadow), "_mscmp");
961 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
962 Cmp, OrigIns,
963 /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
964
965 IRB.SetInsertPoint(CheckTerm);
966 insertWarningFn(IRB, Origin);
967 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " CHECK: " << *Cmp <<
"\n"; } } while (false)
;
968 }
969 }
970
971 void materializeChecks(bool InstrumentWithCalls) {
972 for (const auto &ShadowData : InstrumentationList) {
973 Instruction *OrigIns = ShadowData.OrigIns;
974 Value *Shadow = ShadowData.Shadow;
975 Value *Origin = ShadowData.Origin;
976 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
977 }
978 DEBUG(dbgs() << "DONE:\n" << F)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "DONE:\n" << F; } } while (
false)
;
979 }
980
981 /// \brief Add MemorySanitizer instrumentation to a function.
982 bool runOnFunction() {
983 // In the presence of unreachable blocks, we may see Phi nodes with
984 // incoming nodes from such blocks. Since InstVisitor skips unreachable
985 // blocks, such nodes will not have any shadow value associated with them.
986 // It's easier to remove unreachable blocks than deal with missing shadow.
987 removeUnreachableBlocks(F);
988
989 // Iterate all BBs in depth-first order and create shadow instructions
990 // for all instructions (where applicable).
991 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
992 for (BasicBlock *BB : depth_first(ActualFnStart))
993 visit(*BB);
994
995 // Finalize PHI nodes.
996 for (PHINode *PN : ShadowPHINodes) {
997 PHINode *PNS = cast<PHINode>(getShadow(PN));
998 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
999 size_t NumValues = PN->getNumIncomingValues();
1000 for (size_t v = 0; v < NumValues; v++) {
1001 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1002 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1003 }
1004 }
1005
1006 VAHelper->finalizeInstrumentation();
1007
1008 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
1009 InstrumentationList.size() + StoreList.size() >
1010 (unsigned)ClInstrumentationWithCallThreshold;
1011
1012 // Delayed instrumentation of StoreInst.
1013 // This may add new checks to be inserted later.
1014 materializeStores(InstrumentWithCalls);
1015
1016 // Insert shadow value checks.
1017 materializeChecks(InstrumentWithCalls);
1018
1019 return true;
1020 }
1021
1022 /// \brief Compute the shadow type that corresponds to a given Value.
1023 Type *getShadowTy(Value *V) {
1024 return getShadowTy(V->getType());
1025 }
1026
1027 /// \brief Compute the shadow type that corresponds to a given Type.
1028 Type *getShadowTy(Type *OrigTy) {
1029 if (!OrigTy->isSized()) {
1030 return nullptr;
1031 }
1032 // For integer type, shadow is the same as the original type.
1033 // This may return weird-sized types like i1.
1034 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1035 return IT;
1036 const DataLayout &DL = F.getParent()->getDataLayout();
1037 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1038 uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1039 return VectorType::get(IntegerType::get(*MS.C, EltSize),
1040 VT->getNumElements());
1041 }
1042 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1043 return ArrayType::get(getShadowTy(AT->getElementType()),
1044 AT->getNumElements());
1045 }
1046 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1047 SmallVector<Type*, 4> Elements;
1048 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1049 Elements.push_back(getShadowTy(ST->getElementType(i)));
1050 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1051 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "getShadowTy: " << *ST <<
" ===> " << *Res << "\n"; } } while (false)
;
1052 return Res;
1053 }
1054 uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1055 return IntegerType::get(*MS.C, TypeSize);
1056 }
1057
1058 /// \brief Flatten a vector type.
1059 Type *getShadowTyNoVec(Type *ty) {
1060 if (VectorType *vt = dyn_cast<VectorType>(ty))
1061 return IntegerType::get(*MS.C, vt->getBitWidth());
1062 return ty;
1063 }
1064
1065 /// \brief Convert a shadow value to it's flattened variant.
1066 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
1067 Type *Ty = V->getType();
1068 Type *NoVecTy = getShadowTyNoVec(Ty);
1069 if (Ty == NoVecTy) return V;
1070 return IRB.CreateBitCast(V, NoVecTy);
1071 }
1072
1073 /// \brief Compute the integer shadow offset that corresponds to a given
1074 /// application address.
1075 ///
1076 /// Offset = (Addr & ~AndMask) ^ XorMask
1077 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1078 Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
1079
1080 uint64_t AndMask = MS.MapParams->AndMask;
1081 if (AndMask)
1082 OffsetLong =
1083 IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
1084
1085 uint64_t XorMask = MS.MapParams->XorMask;
1086 if (XorMask)
1087 OffsetLong =
1088 IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
1089 return OffsetLong;
1090 }
1091
1092 /// \brief Compute the shadow and origin addresses corresponding to a given
1093 /// application address.
1094 ///
1095 /// Shadow = ShadowBase + Offset
1096 /// Origin = (OriginBase + Offset) & ~3ULL
1097 std::pair<Value *, Value *> getShadowOriginPtrUserspace(
1098 Value *Addr, IRBuilder<> &IRB, Type *ShadowTy, unsigned Alignment,
1099 Instruction **FirstInsn) {
1100 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1101 Value *ShadowLong = ShadowOffset;
1102 uint64_t ShadowBase = MS.MapParams->ShadowBase;
1103 *FirstInsn = dyn_cast<Instruction>(ShadowLong);
1104 if (ShadowBase != 0) {
1105 ShadowLong =
1106 IRB.CreateAdd(ShadowLong,
1107 ConstantInt::get(MS.IntptrTy, ShadowBase));
1108 }
1109 Value *ShadowPtr =
1110 IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
1111 Value *OriginPtr = nullptr;
1112 if (MS.TrackOrigins) {
1113 Value *OriginLong = ShadowOffset;
1114 uint64_t OriginBase = MS.MapParams->OriginBase;
1115 if (OriginBase != 0)
1116 OriginLong = IRB.CreateAdd(OriginLong,
1117 ConstantInt::get(MS.IntptrTy, OriginBase));
1118 if (Alignment < kMinOriginAlignment) {
1119 uint64_t Mask = kMinOriginAlignment - 1;
1120 OriginLong =
1121 IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
1122 }
1123 OriginPtr =
1124 IRB.CreateIntToPtr(OriginLong, PointerType::get(IRB.getInt32Ty(), 0));
1125 }
1126 return std::make_pair(ShadowPtr, OriginPtr);
1127 }
1128
1129 std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1130 Type *ShadowTy,
1131 unsigned Alignment,
1132 bool isStore) {
1133 Instruction *FirstInsn = nullptr;
1134 std::pair<Value *, Value *> ret =
1135 getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment, &FirstInsn);
1136 return ret;
1137 }
1138
1139 /// \brief Compute the shadow address for a given function argument.
1140 ///
1141 /// Shadow = ParamTLS+ArgOffset.
1142 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
1143 int ArgOffset) {
1144 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1145 if (ArgOffset)
1146 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1147 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1148 "_msarg");
1149 }
1150
1151 /// \brief Compute the origin address for a given function argument.
1152 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1153 int ArgOffset) {
1154 if (!MS.TrackOrigins) return nullptr;
1155 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1156 if (ArgOffset)
1157 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1158 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1159 "_msarg_o");
1160 }
1161
1162 /// \brief Compute the shadow address for a retval.
1163 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1164 return IRB.CreatePointerCast(MS.RetvalTLS,
1165 PointerType::get(getShadowTy(A), 0),
1166 "_msret");
1167 }
1168
1169 /// \brief Compute the origin address for a retval.
1170 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1171 // We keep a single origin for the entire retval. Might be too optimistic.
1172 return MS.RetvalOriginTLS;
1173 }
1174
1175 /// \brief Set SV to be the shadow value for V.
1176 void setShadow(Value *V, Value *SV) {
1177 assert(!ShadowMap.count(V) && "Values may only have one shadow")(static_cast <bool> (!ShadowMap.count(V) && "Values may only have one shadow"
) ? void (0) : __assert_fail ("!ShadowMap.count(V) && \"Values may only have one shadow\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1177, __extension__ __PRETTY_FUNCTION__))
;
1178 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1179 }
1180
1181 /// \brief Set Origin to be the origin value for V.
1182 void setOrigin(Value *V, Value *Origin) {
1183 if (!MS.TrackOrigins) return;
1184 assert(!OriginMap.count(V) && "Values may only have one origin")(static_cast <bool> (!OriginMap.count(V) && "Values may only have one origin"
) ? void (0) : __assert_fail ("!OriginMap.count(V) && \"Values may only have one origin\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1184, __extension__ __PRETTY_FUNCTION__))
;
1185 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "ORIGIN: " << *V << " ==> "
<< *Origin << "\n"; } } while (false)
;
1186 OriginMap[V] = Origin;
1187 }
1188
1189 Constant *getCleanShadow(Type *OrigTy) {
1190 Type *ShadowTy = getShadowTy(OrigTy);
1191 if (!ShadowTy)
1192 return nullptr;
1193 return Constant::getNullValue(ShadowTy);
1194 }
1195
1196 /// \brief Create a clean shadow value for a given value.
1197 ///
1198 /// Clean shadow (all zeroes) means all bits of the value are defined
1199 /// (initialized).
1200 Constant *getCleanShadow(Value *V) {
1201 return getCleanShadow(V->getType());
1202 }
1203
1204 /// \brief Create a dirty shadow of a given shadow type.
1205 Constant *getPoisonedShadow(Type *ShadowTy) {
1206 assert(ShadowTy)(static_cast <bool> (ShadowTy) ? void (0) : __assert_fail
("ShadowTy", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1206, __extension__ __PRETTY_FUNCTION__))
;
1207 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1208 return Constant::getAllOnesValue(ShadowTy);
1209 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1210 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1211 getPoisonedShadow(AT->getElementType()));
1212 return ConstantArray::get(AT, Vals);
1213 }
1214 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1215 SmallVector<Constant *, 4> Vals;
1216 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1217 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1218 return ConstantStruct::get(ST, Vals);
1219 }
1220 llvm_unreachable("Unexpected shadow type")::llvm::llvm_unreachable_internal("Unexpected shadow type", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1220)
;
1221 }
1222
1223 /// \brief Create a dirty shadow for a given value.
1224 Constant *getPoisonedShadow(Value *V) {
1225 Type *ShadowTy = getShadowTy(V);
1226 if (!ShadowTy)
1227 return nullptr;
1228 return getPoisonedShadow(ShadowTy);
1229 }
1230
1231 /// \brief Create a clean (zero) origin.
1232 Value *getCleanOrigin() {
1233 return Constant::getNullValue(MS.OriginTy);
1234 }
1235
1236 /// \brief Get the shadow value for a given Value.
1237 ///
1238 /// This function either returns the value set earlier with setShadow,
1239 /// or extracts if from ParamTLS (for function arguments).
1240 Value *getShadow(Value *V) {
1241 if (!PropagateShadow) return getCleanShadow(V);
1242 if (Instruction *I = dyn_cast<Instruction>(V)) {
1243 if (I->getMetadata("nosanitize"))
1244 return getCleanShadow(V);
1245 // For instructions the shadow is already stored in the map.
1246 Value *Shadow = ShadowMap[V];
1247 if (!Shadow) {
1248 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "No shadow: " << *V <<
"\n" << *(I->getParent()); } } while (false)
;
1249 (void)I;
1250 assert(Shadow && "No shadow for a value")(static_cast <bool> (Shadow && "No shadow for a value"
) ? void (0) : __assert_fail ("Shadow && \"No shadow for a value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1250, __extension__ __PRETTY_FUNCTION__))
;
1251 }
1252 return Shadow;
1253 }
1254 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1255 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1256 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Undef: " << *U << " ==> "
<< *AllOnes << "\n"; } } while (false)
;
1257 (void)U;
1258 return AllOnes;
1259 }
1260 if (Argument *A = dyn_cast<Argument>(V)) {
1261 // For arguments we compute the shadow on demand and store it in the map.
1262 Value **ShadowPtr = &ShadowMap[V];
1263 if (*ShadowPtr)
1264 return *ShadowPtr;
1265 Function *F = A->getParent();
1266 IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI());
1267 unsigned ArgOffset = 0;
1268 const DataLayout &DL = F->getParent()->getDataLayout();
1269 for (auto &FArg : F->args()) {
1270 if (!FArg.getType()->isSized()) {
1271 DEBUG(dbgs() << "Arg is not sized\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Arg is not sized\n"; } } while (
false)
;
1272 continue;
1273 }
1274 unsigned Size =
1275 FArg.hasByValAttr()
1276 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1277 : DL.getTypeAllocSize(FArg.getType());
1278 if (A == &FArg) {
1279 bool Overflow = ArgOffset + Size > kParamTLSSize;
1280 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1281 if (FArg.hasByValAttr()) {
1282 // ByVal pointer itself has clean shadow. We copy the actual
1283 // argument shadow to the underlying memory.
1284 // Figure out maximal valid memcpy alignment.
1285 unsigned ArgAlign = FArg.getParamAlignment();
1286 if (ArgAlign == 0) {
1287 Type *EltType = A->getType()->getPointerElementType();
1288 ArgAlign = DL.getABITypeAlignment(EltType);
1289 }
1290 Value *CpShadowPtr =
1291 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1292 /*isStore*/ true)
1293 .first;
1294 if (Overflow) {
1295 // ParamTLS overflow.
1296 EntryIRB.CreateMemSet(
1297 CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
1298 Size, ArgAlign);
1299 } else {
1300 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1301 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1302 CopyAlign, Size);
1303 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ByValCpy: " << *Cpy <<
"\n"; } } while (false)
;
1304 (void)Cpy;
1305 }
1306 *ShadowPtr = getCleanShadow(V);
1307 } else {
1308 if (Overflow) {
1309 // ParamTLS overflow.
1310 *ShadowPtr = getCleanShadow(V);
1311 } else {
1312 *ShadowPtr =
1313 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1314 }
1315 }
1316 DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ARG: " << FArg <<
" ==> " << **ShadowPtr << "\n"; } } while (false
)
1317 **ShadowPtr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ARG: " << FArg <<
" ==> " << **ShadowPtr << "\n"; } } while (false
)
;
1318 if (MS.TrackOrigins && !Overflow) {
1319 Value *OriginPtr =
1320 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1321 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1322 } else {
1323 setOrigin(A, getCleanOrigin());
1324 }
1325 }
1326 ArgOffset += alignTo(Size, kShadowTLSAlignment);
1327 }
1328 assert(*ShadowPtr && "Could not find shadow for an argument")(static_cast <bool> (*ShadowPtr && "Could not find shadow for an argument"
) ? void (0) : __assert_fail ("*ShadowPtr && \"Could not find shadow for an argument\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1328, __extension__ __PRETTY_FUNCTION__))
;
1329 return *ShadowPtr;
1330 }
1331 // For everything else the shadow is zero.
1332 return getCleanShadow(V);
1333 }
1334
1335 /// \brief Get the shadow for i-th argument of the instruction I.
1336 Value *getShadow(Instruction *I, int i) {
1337 return getShadow(I->getOperand(i));
1338 }
1339
1340 /// \brief Get the origin for a value.
1341 Value *getOrigin(Value *V) {
1342 if (!MS.TrackOrigins) return nullptr;
1343 if (!PropagateShadow) return getCleanOrigin();
1344 if (isa<Constant>(V)) return getCleanOrigin();
1345 assert((isa<Instruction>(V) || isa<Argument>(V)) &&(static_cast <bool> ((isa<Instruction>(V) || isa<
Argument>(V)) && "Unexpected value type in getOrigin()"
) ? void (0) : __assert_fail ("(isa<Instruction>(V) || isa<Argument>(V)) && \"Unexpected value type in getOrigin()\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1346, __extension__ __PRETTY_FUNCTION__))
1346 "Unexpected value type in getOrigin()")(static_cast <bool> ((isa<Instruction>(V) || isa<
Argument>(V)) && "Unexpected value type in getOrigin()"
) ? void (0) : __assert_fail ("(isa<Instruction>(V) || isa<Argument>(V)) && \"Unexpected value type in getOrigin()\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1346, __extension__ __PRETTY_FUNCTION__))
;
1347 if (Instruction *I = dyn_cast<Instruction>(V)) {
1348 if (I->getMetadata("nosanitize"))
1349 return getCleanOrigin();
1350 }
1351 Value *Origin = OriginMap[V];
1352 assert(Origin && "Missing origin")(static_cast <bool> (Origin && "Missing origin"
) ? void (0) : __assert_fail ("Origin && \"Missing origin\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1352, __extension__ __PRETTY_FUNCTION__))
;
1353 return Origin;
1354 }
1355
1356 /// \brief Get the origin for i-th argument of the instruction I.
1357 Value *getOrigin(Instruction *I, int i) {
1358 return getOrigin(I->getOperand(i));
1359 }
1360
1361 /// \brief Remember the place where a shadow check should be inserted.
1362 ///
1363 /// This location will be later instrumented with a check that will print a
1364 /// UMR warning in runtime if the shadow value is not 0.
1365 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1366 assert(Shadow)(static_cast <bool> (Shadow) ? void (0) : __assert_fail
("Shadow", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1366, __extension__ __PRETTY_FUNCTION__))
;
1367 if (!InsertChecks) return;
1368#ifndef NDEBUG
1369 Type *ShadowTy = Shadow->getType();
1370 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&(static_cast <bool> ((isa<IntegerType>(ShadowTy) ||
isa<VectorType>(ShadowTy)) && "Can only insert checks for integer and vector shadow types"
) ? void (0) : __assert_fail ("(isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && \"Can only insert checks for integer and vector shadow types\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1371, __extension__ __PRETTY_FUNCTION__))
1371 "Can only insert checks for integer and vector shadow types")(static_cast <bool> ((isa<IntegerType>(ShadowTy) ||
isa<VectorType>(ShadowTy)) && "Can only insert checks for integer and vector shadow types"
) ? void (0) : __assert_fail ("(isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && \"Can only insert checks for integer and vector shadow types\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1371, __extension__ __PRETTY_FUNCTION__))
;
1372#endif
1373 InstrumentationList.push_back(
1374 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1375 }
1376
1377 /// \brief Remember the place where a shadow check should be inserted.
1378 ///
1379 /// This location will be later instrumented with a check that will print a
1380 /// UMR warning in runtime if the value is not fully defined.
1381 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1382 assert(Val)(static_cast <bool> (Val) ? void (0) : __assert_fail ("Val"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1382, __extension__ __PRETTY_FUNCTION__))
;
1383 Value *Shadow, *Origin;
1384 if (ClCheckConstantShadow) {
1385 Shadow = getShadow(Val);
1386 if (!Shadow) return;
1387 Origin = getOrigin(Val);
1388 } else {
1389 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1390 if (!Shadow) return;
1391 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1392 }
1393 insertShadowCheck(Shadow, Origin, OrigIns);
1394 }
1395
1396 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1397 switch (a) {
1398 case AtomicOrdering::NotAtomic:
1399 return AtomicOrdering::NotAtomic;
1400 case AtomicOrdering::Unordered:
1401 case AtomicOrdering::Monotonic:
1402 case AtomicOrdering::Release:
1403 return AtomicOrdering::Release;
1404 case AtomicOrdering::Acquire:
1405 case AtomicOrdering::AcquireRelease:
1406 return AtomicOrdering::AcquireRelease;
1407 case AtomicOrdering::SequentiallyConsistent:
1408 return AtomicOrdering::SequentiallyConsistent;
1409 }
1410 llvm_unreachable("Unknown ordering")::llvm::llvm_unreachable_internal("Unknown ordering", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1410)
;
1411 }
1412
1413 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1414 switch (a) {
1415 case AtomicOrdering::NotAtomic:
1416 return AtomicOrdering::NotAtomic;
1417 case AtomicOrdering::Unordered:
1418 case AtomicOrdering::Monotonic:
1419 case AtomicOrdering::Acquire:
1420 return AtomicOrdering::Acquire;
1421 case AtomicOrdering::Release:
1422 case AtomicOrdering::AcquireRelease:
1423 return AtomicOrdering::AcquireRelease;
1424 case AtomicOrdering::SequentiallyConsistent:
1425 return AtomicOrdering::SequentiallyConsistent;
1426 }
1427 llvm_unreachable("Unknown ordering")::llvm::llvm_unreachable_internal("Unknown ordering", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1427)
;
1428 }
1429
1430 // ------------------- Visitors.
1431 using InstVisitor<MemorySanitizerVisitor>::visit;
1432 void visit(Instruction &I) {
1433 if (!I.getMetadata("nosanitize"))
1434 InstVisitor<MemorySanitizerVisitor>::visit(I);
1435 }
1436
1437 /// \brief Instrument LoadInst
1438 ///
1439 /// Loads the corresponding shadow and (optionally) origin.
1440 /// Optionally, checks that the load address is fully defined.
1441 void visitLoadInst(LoadInst &I) {
1442 assert(I.getType()->isSized() && "Load type must have size")(static_cast <bool> (I.getType()->isSized() &&
"Load type must have size") ? void (0) : __assert_fail ("I.getType()->isSized() && \"Load type must have size\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1442, __extension__ __PRETTY_FUNCTION__))
;
1443 assert(!I.getMetadata("nosanitize"))(static_cast <bool> (!I.getMetadata("nosanitize")) ? void
(0) : __assert_fail ("!I.getMetadata(\"nosanitize\")", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1443, __extension__ __PRETTY_FUNCTION__))
;
1444 IRBuilder<> IRB(I.getNextNode());
1445 Type *ShadowTy = getShadowTy(&I);
1446 Value *Addr = I.getPointerOperand();
1447 Value *ShadowPtr, *OriginPtr;
1448 unsigned Alignment = I.getAlignment();
1449 if (PropagateShadow) {
1450 std::tie(ShadowPtr, OriginPtr) =
1451 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
1452 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
1453 } else {
1454 setShadow(&I, getCleanShadow(&I));
1455 }
1456
1457 if (ClCheckAccessAddress)
1458 insertShadowCheck(I.getPointerOperand(), &I);
1459
1460 if (I.isAtomic())
1461 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1462
1463 if (MS.TrackOrigins) {
1464 if (PropagateShadow) {
1465 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1466 setOrigin(&I, IRB.CreateAlignedLoad(OriginPtr, OriginAlignment));
1467 } else {
1468 setOrigin(&I, getCleanOrigin());
1469 }
1470 }
1471 }
1472
1473 /// \brief Instrument StoreInst
1474 ///
1475 /// Stores the corresponding shadow and (optionally) origin.
1476 /// Optionally, checks that the store address is fully defined.
1477 void visitStoreInst(StoreInst &I) {
1478 StoreList.push_back(&I);
1479 }
1480
1481 void handleCASOrRMW(Instruction &I) {
1482 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I))(static_cast <bool> (isa<AtomicRMWInst>(I) || isa
<AtomicCmpXchgInst>(I)) ? void (0) : __assert_fail ("isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1482, __extension__ __PRETTY_FUNCTION__))
;
1483
1484 IRBuilder<> IRB(&I);
1485 Value *Addr = I.getOperand(0);
1486 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.getType(),
1487 /*Alignment*/ 1, /*isStore*/ true)
1488 .first;
1489
1490 if (ClCheckAccessAddress)
1491 insertShadowCheck(Addr, &I);
1492
1493 // Only test the conditional argument of cmpxchg instruction.
1494 // The other argument can potentially be uninitialized, but we can not
1495 // detect this situation reliably without possible false positives.
1496 if (isa<AtomicCmpXchgInst>(I))
1497 insertShadowCheck(I.getOperand(1), &I);
1498
1499 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1500
1501 setShadow(&I, getCleanShadow(&I));
1502 setOrigin(&I, getCleanOrigin());
1503 }
1504
1505 void visitAtomicRMWInst(AtomicRMWInst &I) {
1506 handleCASOrRMW(I);
1507 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1508 }
1509
1510 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1511 handleCASOrRMW(I);
1512 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1513 }
1514
1515 // Vector manipulation.
1516 void visitExtractElementInst(ExtractElementInst &I) {
1517 insertShadowCheck(I.getOperand(1), &I);
1518 IRBuilder<> IRB(&I);
1519 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1520 "_msprop"));
1521 setOrigin(&I, getOrigin(&I, 0));
1522 }
1523
1524 void visitInsertElementInst(InsertElementInst &I) {
1525 insertShadowCheck(I.getOperand(2), &I);
1526 IRBuilder<> IRB(&I);
1527 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1528 I.getOperand(2), "_msprop"));
1529 setOriginForNaryOp(I);
1530 }
1531
1532 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1533 insertShadowCheck(I.getOperand(2), &I);
1534 IRBuilder<> IRB(&I);
1535 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1536 I.getOperand(2), "_msprop"));
1537 setOriginForNaryOp(I);
1538 }
1539
1540 // Casts.
1541 void visitSExtInst(SExtInst &I) {
1542 IRBuilder<> IRB(&I);
1543 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1544 setOrigin(&I, getOrigin(&I, 0));
1545 }
1546
1547 void visitZExtInst(ZExtInst &I) {
1548 IRBuilder<> IRB(&I);
1549 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1550 setOrigin(&I, getOrigin(&I, 0));
1551 }
1552
1553 void visitTruncInst(TruncInst &I) {
1554 IRBuilder<> IRB(&I);
1555 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1556 setOrigin(&I, getOrigin(&I, 0));
1557 }
1558
1559 void visitBitCastInst(BitCastInst &I) {
1560 // Special case: if this is the bitcast (there is exactly 1 allowed) between
1561 // a musttail call and a ret, don't instrument. New instructions are not
1562 // allowed after a musttail call.
1563 if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1564 if (CI->isMustTailCall())
1565 return;
1566 IRBuilder<> IRB(&I);
1567 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1568 setOrigin(&I, getOrigin(&I, 0));
1569 }
1570
1571 void visitPtrToIntInst(PtrToIntInst &I) {
1572 IRBuilder<> IRB(&I);
1573 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1574 "_msprop_ptrtoint"));
1575 setOrigin(&I, getOrigin(&I, 0));
1576 }
1577
1578 void visitIntToPtrInst(IntToPtrInst &I) {
1579 IRBuilder<> IRB(&I);
1580 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1581 "_msprop_inttoptr"));
1582 setOrigin(&I, getOrigin(&I, 0));
1583 }
1584
1585 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1586 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1587 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1588 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1589 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1590 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1591
1592 /// \brief Propagate shadow for bitwise AND.
1593 ///
1594 /// This code is exact, i.e. if, for example, a bit in the left argument
1595 /// is defined and 0, then neither the value not definedness of the
1596 /// corresponding bit in B don't affect the resulting shadow.
1597 void visitAnd(BinaryOperator &I) {
1598 IRBuilder<> IRB(&I);
1599 // "And" of 0 and a poisoned value results in unpoisoned value.
1600 // 1&1 => 1; 0&1 => 0; p&1 => p;
1601 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1602 // 1&p => p; 0&p => 0; p&p => p;
1603 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1604 Value *S1 = getShadow(&I, 0);
1605 Value *S2 = getShadow(&I, 1);
1606 Value *V1 = I.getOperand(0);
1607 Value *V2 = I.getOperand(1);
1608 if (V1->getType() != S1->getType()) {
1609 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1610 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1611 }
1612 Value *S1S2 = IRB.CreateAnd(S1, S2);
1613 Value *V1S2 = IRB.CreateAnd(V1, S2);
1614 Value *S1V2 = IRB.CreateAnd(S1, V2);
1615 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1616 setOriginForNaryOp(I);
1617 }
1618
1619 void visitOr(BinaryOperator &I) {
1620 IRBuilder<> IRB(&I);
1621 // "Or" of 1 and a poisoned value results in unpoisoned value.
1622 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1623 // 1|0 => 1; 0|0 => 0; p|0 => p;
1624 // 1|p => 1; 0|p => p; p|p => p;
1625 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1626 Value *S1 = getShadow(&I, 0);
1627 Value *S2 = getShadow(&I, 1);
1628 Value *V1 = IRB.CreateNot(I.getOperand(0));
1629 Value *V2 = IRB.CreateNot(I.getOperand(1));
1630 if (V1->getType() != S1->getType()) {
1631 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1632 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1633 }
1634 Value *S1S2 = IRB.CreateAnd(S1, S2);
1635 Value *V1S2 = IRB.CreateAnd(V1, S2);
1636 Value *S1V2 = IRB.CreateAnd(S1, V2);
1637 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1638 setOriginForNaryOp(I);
1639 }
1640
1641 /// \brief Default propagation of shadow and/or origin.
1642 ///
1643 /// This class implements the general case of shadow propagation, used in all
1644 /// cases where we don't know and/or don't care about what the operation
1645 /// actually does. It converts all input shadow values to a common type
1646 /// (extending or truncating as necessary), and bitwise OR's them.
1647 ///
1648 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1649 /// fully initialized), and less prone to false positives.
1650 ///
1651 /// This class also implements the general case of origin propagation. For a
1652 /// Nary operation, result origin is set to the origin of an argument that is
1653 /// not entirely initialized. If there is more than one such arguments, the
1654 /// rightmost of them is picked. It does not matter which one is picked if all
1655 /// arguments are initialized.
1656 template <bool CombineShadow>
1657 class Combiner {
1658 Value *Shadow = nullptr;
1659 Value *Origin = nullptr;
1660 IRBuilder<> &IRB;
1661 MemorySanitizerVisitor *MSV;
1662
1663 public:
1664 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
1665 : IRB(IRB), MSV(MSV) {}
1666
1667 /// \brief Add a pair of shadow and origin values to the mix.
1668 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1669 if (CombineShadow) {
1670 assert(OpShadow)(static_cast <bool> (OpShadow) ? void (0) : __assert_fail
("OpShadow", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1670, __extension__ __PRETTY_FUNCTION__))
;
1671 if (!Shadow)
1672 Shadow = OpShadow;
1673 else {
1674 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1675 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1676 }
1677 }
1678
1679 if (MSV->MS.TrackOrigins) {
1680 assert(OpOrigin)(static_cast <bool> (OpOrigin) ? void (0) : __assert_fail
("OpOrigin", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1680, __extension__ __PRETTY_FUNCTION__))
;
1681 if (!Origin) {
1682 Origin = OpOrigin;
1683 } else {
1684 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
1685 // No point in adding something that might result in 0 origin value.
1686 if (!ConstOrigin || !ConstOrigin->isNullValue()) {
1687 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1688 Value *Cond =
1689 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1690 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1691 }
1692 }
1693 }
1694 return *this;
1695 }
1696
1697 /// \brief Add an application value to the mix.
1698 Combiner &Add(Value *V) {
1699 Value *OpShadow = MSV->getShadow(V);
1700 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1701 return Add(OpShadow, OpOrigin);
1702 }
1703
1704 /// \brief Set the current combined values as the given instruction's shadow
1705 /// and origin.
1706 void Done(Instruction *I) {
1707 if (CombineShadow) {
1708 assert(Shadow)(static_cast <bool> (Shadow) ? void (0) : __assert_fail
("Shadow", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1708, __extension__ __PRETTY_FUNCTION__))
;
1709 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1710 MSV->setShadow(I, Shadow);
1711 }
1712 if (MSV->MS.TrackOrigins) {
1713 assert(Origin)(static_cast <bool> (Origin) ? void (0) : __assert_fail
("Origin", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1713, __extension__ __PRETTY_FUNCTION__))
;
1714 MSV->setOrigin(I, Origin);
1715 }
1716 }
1717 };
1718
1719 using ShadowAndOriginCombiner = Combiner<true>;
1720 using OriginCombiner = Combiner<false>;
1721
1722 /// \brief Propagate origin for arbitrary operation.
1723 void setOriginForNaryOp(Instruction &I) {
1724 if (!MS.TrackOrigins) return;
1725 IRBuilder<> IRB(&I);
1726 OriginCombiner OC(this, IRB);
1727 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1728 OC.Add(OI->get());
1729 OC.Done(&I);
1730 }
1731
1732 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1733 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&(static_cast <bool> (!(Ty->isVectorTy() && Ty
->getScalarType()->isPointerTy()) && "Vector of pointers is not a valid shadow type"
) ? void (0) : __assert_fail ("!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && \"Vector of pointers is not a valid shadow type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1734, __extension__ __PRETTY_FUNCTION__))
1734 "Vector of pointers is not a valid shadow type")(static_cast <bool> (!(Ty->isVectorTy() && Ty
->getScalarType()->isPointerTy()) && "Vector of pointers is not a valid shadow type"
) ? void (0) : __assert_fail ("!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && \"Vector of pointers is not a valid shadow type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 1734, __extension__ __PRETTY_FUNCTION__))
;
1735 return Ty->isVectorTy() ?
1736 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1737 Ty->getPrimitiveSizeInBits();
1738 }
1739
1740 /// \brief Cast between two shadow types, extending or truncating as
1741 /// necessary.
1742 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1743 bool Signed = false) {
1744 Type *srcTy = V->getType();
1745 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1746 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1747 if (srcSizeInBits > 1 && dstSizeInBits == 1)
1748 return IRB.CreateICmpNE(V, getCleanShadow(V));
1749
1750 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1751 return IRB.CreateIntCast(V, dstTy, Signed);
1752 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1753 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1754 return IRB.CreateIntCast(V, dstTy, Signed);
1755 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1756 Value *V2 =
1757 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1758 return IRB.CreateBitCast(V2, dstTy);
1759 // TODO: handle struct types.
1760 }
1761
1762 /// \brief Cast an application value to the type of its own shadow.
1763 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1764 Type *ShadowTy = getShadowTy(V);
1765 if (V->getType() == ShadowTy)
1766 return V;
1767 if (V->getType()->isPtrOrPtrVectorTy())
1768 return IRB.CreatePtrToInt(V, ShadowTy);
1769 else
1770 return IRB.CreateBitCast(V, ShadowTy);
1771 }
1772
1773 /// \brief Propagate shadow for arbitrary operation.
1774 void handleShadowOr(Instruction &I) {
1775 IRBuilder<> IRB(&I);
1776 ShadowAndOriginCombiner SC(this, IRB);
1777 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1778 SC.Add(OI->get());
1779 SC.Done(&I);
1780 }
1781
1782 // \brief Handle multiplication by constant.
1783 //
1784 // Handle a special case of multiplication by constant that may have one or
1785 // more zeros in the lower bits. This makes corresponding number of lower bits
1786 // of the result zero as well. We model it by shifting the other operand
1787 // shadow left by the required number of bits. Effectively, we transform
1788 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
1789 // We use multiplication by 2**N instead of shift to cover the case of
1790 // multiplication by 0, which may occur in some elements of a vector operand.
1791 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
1792 Value *OtherArg) {
1793 Constant *ShadowMul;
1794 Type *Ty = ConstArg->getType();
1795 if (Ty->isVectorTy()) {
1796 unsigned NumElements = Ty->getVectorNumElements();
1797 Type *EltTy = Ty->getSequentialElementType();
1798 SmallVector<Constant *, 16> Elements;
1799 for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
1800 if (ConstantInt *Elt =
1801 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
1802 const APInt &V = Elt->getValue();
1803 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1804 Elements.push_back(ConstantInt::get(EltTy, V2));
1805 } else {
1806 Elements.push_back(ConstantInt::get(EltTy, 1));
1807 }
1808 }
1809 ShadowMul = ConstantVector::get(Elements);
1810 } else {
1811 if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
1812 const APInt &V = Elt->getValue();
1813 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1814 ShadowMul = ConstantInt::get(Ty, V2);
1815 } else {
1816 ShadowMul = ConstantInt::get(Ty, 1);
1817 }
1818 }
1819
1820 IRBuilder<> IRB(&I);
1821 setShadow(&I,
1822 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
1823 setOrigin(&I, getOrigin(OtherArg));
1824 }
1825
1826 void visitMul(BinaryOperator &I) {
1827 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1828 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1829 if (constOp0 && !constOp1)
1830 handleMulByConstant(I, constOp0, I.getOperand(1));
1831 else if (constOp1 && !constOp0)
1832 handleMulByConstant(I, constOp1, I.getOperand(0));
1833 else
1834 handleShadowOr(I);
1835 }
1836
1837 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1838 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1839 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1840 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1841 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1842 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1843
1844 void handleDiv(Instruction &I) {
1845 IRBuilder<> IRB(&I);
1846 // Strict on the second argument.
1847 insertShadowCheck(I.getOperand(1), &I);
1848 setShadow(&I, getShadow(&I, 0));
1849 setOrigin(&I, getOrigin(&I, 0));
1850 }
1851
1852 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1853 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1854 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1855 void visitURem(BinaryOperator &I) { handleDiv(I); }
1856 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1857 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1858
1859 /// \brief Instrument == and != comparisons.
1860 ///
1861 /// Sometimes the comparison result is known even if some of the bits of the
1862 /// arguments are not.
1863 void handleEqualityComparison(ICmpInst &I) {
1864 IRBuilder<> IRB(&I);
1865 Value *A = I.getOperand(0);
1866 Value *B = I.getOperand(1);
1867 Value *Sa = getShadow(A);
1868 Value *Sb = getShadow(B);
1869
1870 // Get rid of pointers and vectors of pointers.
1871 // For ints (and vectors of ints), types of A and Sa match,
1872 // and this is a no-op.
1873 A = IRB.CreatePointerCast(A, Sa->getType());
1874 B = IRB.CreatePointerCast(B, Sb->getType());
1875
1876 // A == B <==> (C = A^B) == 0
1877 // A != B <==> (C = A^B) != 0
1878 // Sc = Sa | Sb
1879 Value *C = IRB.CreateXor(A, B);
1880 Value *Sc = IRB.CreateOr(Sa, Sb);
1881 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1882 // Result is defined if one of the following is true
1883 // * there is a defined 1 bit in C
1884 // * C is fully defined
1885 // Si = !(C & ~Sc) && Sc
1886 Value *Zero = Constant::getNullValue(Sc->getType());
1887 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1888 Value *Si =
1889 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1890 IRB.CreateICmpEQ(
1891 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1892 Si->setName("_msprop_icmp");
1893 setShadow(&I, Si);
1894 setOriginForNaryOp(I);
1895 }
1896
1897 /// \brief Build the lowest possible value of V, taking into account V's
1898 /// uninitialized bits.
1899 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1900 bool isSigned) {
1901 if (isSigned) {
1902 // Split shadow into sign bit and other bits.
1903 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1904 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1905 // Maximise the undefined shadow bit, minimize other undefined bits.
1906 return
1907 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1908 } else {
1909 // Minimize undefined bits.
1910 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1911 }
1912 }
1913
1914 /// \brief Build the highest possible value of V, taking into account V's
1915 /// uninitialized bits.
1916 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1917 bool isSigned) {
1918 if (isSigned) {
1919 // Split shadow into sign bit and other bits.
1920 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1921 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1922 // Minimise the undefined shadow bit, maximise other undefined bits.
1923 return
1924 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1925 } else {
1926 // Maximize undefined bits.
1927 return IRB.CreateOr(A, Sa);
1928 }
1929 }
1930
1931 /// \brief Instrument relational comparisons.
1932 ///
1933 /// This function does exact shadow propagation for all relational
1934 /// comparisons of integers, pointers and vectors of those.
1935 /// FIXME: output seems suboptimal when one of the operands is a constant
1936 void handleRelationalComparisonExact(ICmpInst &I) {
1937 IRBuilder<> IRB(&I);
1938 Value *A = I.getOperand(0);
1939 Value *B = I.getOperand(1);
1940 Value *Sa = getShadow(A);
1941 Value *Sb = getShadow(B);
1942
1943 // Get rid of pointers and vectors of pointers.
1944 // For ints (and vectors of ints), types of A and Sa match,
1945 // and this is a no-op.
1946 A = IRB.CreatePointerCast(A, Sa->getType());
1947 B = IRB.CreatePointerCast(B, Sb->getType());
1948
1949 // Let [a0, a1] be the interval of possible values of A, taking into account
1950 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1951 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1952 bool IsSigned = I.isSigned();
1953 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1954 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1955 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1956 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1957 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1958 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1959 Value *Si = IRB.CreateXor(S1, S2);
1960 setShadow(&I, Si);
1961 setOriginForNaryOp(I);
1962 }
1963
1964 /// \brief Instrument signed relational comparisons.
1965 ///
1966 /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
1967 /// bit of the shadow. Everything else is delegated to handleShadowOr().
1968 void handleSignedRelationalComparison(ICmpInst &I) {
1969 Constant *constOp;
1970 Value *op = nullptr;
1971 CmpInst::Predicate pre;
1972 if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
1973 op = I.getOperand(0);
1974 pre = I.getPredicate();
1975 } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
1976 op = I.getOperand(1);
1977 pre = I.getSwappedPredicate();
1978 } else {
1979 handleShadowOr(I);
1980 return;
1981 }
1982
1983 if ((constOp->isNullValue() &&
1984 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
1985 (constOp->isAllOnesValue() &&
1986 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
1987 IRBuilder<> IRB(&I);
1988 Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
1989 "_msprop_icmp_s");
1990 setShadow(&I, Shadow);
1991 setOrigin(&I, getOrigin(op));
1992 } else {
1993 handleShadowOr(I);
1994 }
1995 }
1996
1997 void visitICmpInst(ICmpInst &I) {
1998 if (!ClHandleICmp) {
1999 handleShadowOr(I);
2000 return;
2001 }
2002 if (I.isEquality()) {
2003 handleEqualityComparison(I);
2004 return;
2005 }
2006
2007 assert(I.isRelational())(static_cast <bool> (I.isRelational()) ? void (0) : __assert_fail
("I.isRelational()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2007, __extension__ __PRETTY_FUNCTION__))
;
2008 if (ClHandleICmpExact) {
2009 handleRelationalComparisonExact(I);
2010 return;
2011 }
2012 if (I.isSigned()) {
2013 handleSignedRelationalComparison(I);
2014 return;
2015 }
2016
2017 assert(I.isUnsigned())(static_cast <bool> (I.isUnsigned()) ? void (0) : __assert_fail
("I.isUnsigned()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2017, __extension__ __PRETTY_FUNCTION__))
;
2018 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2019 handleRelationalComparisonExact(I);
2020 return;
2021 }
2022
2023 handleShadowOr(I);
2024 }
2025
2026 void visitFCmpInst(FCmpInst &I) {
2027 handleShadowOr(I);
2028 }
2029
2030 void handleShift(BinaryOperator &I) {
2031 IRBuilder<> IRB(&I);
2032 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2033 // Otherwise perform the same shift on S1.
2034 Value *S1 = getShadow(&I, 0);
2035 Value *S2 = getShadow(&I, 1);
2036 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
2037 S2->getType());
2038 Value *V2 = I.getOperand(1);
2039 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2040 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2041 setOriginForNaryOp(I);
2042 }
2043
2044 void visitShl(BinaryOperator &I) { handleShift(I); }
2045 void visitAShr(BinaryOperator &I) { handleShift(I); }
2046 void visitLShr(BinaryOperator &I) { handleShift(I); }
2047
2048 /// \brief Instrument llvm.memmove
2049 ///
2050 /// At this point we don't know if llvm.memmove will be inlined or not.
2051 /// If we don't instrument it and it gets inlined,
2052 /// our interceptor will not kick in and we will lose the memmove.
2053 /// If we instrument the call here, but it does not get inlined,
2054 /// we will memove the shadow twice: which is bad in case
2055 /// of overlapping regions. So, we simply lower the intrinsic to a call.
2056 ///
2057 /// Similar situation exists for memcpy and memset.
2058 void visitMemMoveInst(MemMoveInst &I) {
2059 IRBuilder<> IRB(&I);
2060 IRB.CreateCall(
2061 MS.MemmoveFn,
2062 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2063 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2064 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2065 I.eraseFromParent();
2066 }
2067
2068 // Similar to memmove: avoid copying shadow twice.
2069 // This is somewhat unfortunate as it may slowdown small constant memcpys.
2070 // FIXME: consider doing manual inline for small constant sizes and proper
2071 // alignment.
2072 void visitMemCpyInst(MemCpyInst &I) {
2073 IRBuilder<> IRB(&I);
2074 IRB.CreateCall(
2075 MS.MemcpyFn,
2076 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2077 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2078 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2079 I.eraseFromParent();
2080 }
2081
2082 // Same as memcpy.
2083 void visitMemSetInst(MemSetInst &I) {
2084 IRBuilder<> IRB(&I);
2085 IRB.CreateCall(
2086 MS.MemsetFn,
2087 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2088 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2089 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2090 I.eraseFromParent();
2091 }
2092
2093 void visitVAStartInst(VAStartInst &I) {
2094 VAHelper->visitVAStartInst(I);
2095 }
2096
2097 void visitVACopyInst(VACopyInst &I) {
2098 VAHelper->visitVACopyInst(I);
2099 }
2100
2101 /// \brief Handle vector store-like intrinsics.
2102 ///
2103 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2104 /// has 1 pointer argument and 1 vector argument, returns void.
2105 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2106 IRBuilder<> IRB(&I);
2107 Value* Addr = I.getArgOperand(0);
2108 Value *Shadow = getShadow(&I, 1);
2109 Value *ShadowPtr, *OriginPtr;
2110
2111 // We don't know the pointer alignment (could be unaligned SSE store!).
2112 // Have to assume to worst case.
2113 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2114 Addr, IRB, Shadow->getType(), /*Alignment*/ 1, /*isStore*/ true);
2115 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
2116
2117 if (ClCheckAccessAddress)
2118 insertShadowCheck(Addr, &I);
2119
2120 // FIXME: factor out common code from materializeStores
2121 if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2122 return true;
2123 }
2124
2125 /// \brief Handle vector load-like intrinsics.
2126 ///
2127 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2128 /// has 1 pointer argument, returns a vector.
2129 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2130 IRBuilder<> IRB(&I);
2131 Value *Addr = I.getArgOperand(0);
2132
2133 Type *ShadowTy = getShadowTy(&I);
2134 Value *ShadowPtr, *OriginPtr;
2135 if (PropagateShadow) {
2136 // We don't know the pointer alignment (could be unaligned SSE load!).
2137 // Have to assume to worst case.
2138 unsigned Alignment = 1;
2139 std::tie(ShadowPtr, OriginPtr) =
2140 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2141 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
2142 } else {
2143 setShadow(&I, getCleanShadow(&I));
2144 }
2145
2146 if (ClCheckAccessAddress)
2147 insertShadowCheck(Addr, &I);
2148
2149 if (MS.TrackOrigins) {
2150 if (PropagateShadow)
2151 setOrigin(&I, IRB.CreateLoad(OriginPtr));
2152 else
2153 setOrigin(&I, getCleanOrigin());
2154 }
2155 return true;
2156 }
2157
2158 /// \brief Handle (SIMD arithmetic)-like intrinsics.
2159 ///
2160 /// Instrument intrinsics with any number of arguments of the same type,
2161 /// equal to the return type. The type should be simple (no aggregates or
2162 /// pointers; vectors are fine).
2163 /// Caller guarantees that this intrinsic does not access memory.
2164 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2165 Type *RetTy = I.getType();
2166 if (!(RetTy->isIntOrIntVectorTy() ||
2167 RetTy->isFPOrFPVectorTy() ||
2168 RetTy->isX86_MMXTy()))
2169 return false;
2170
2171 unsigned NumArgOperands = I.getNumArgOperands();
2172
2173 for (unsigned i = 0; i < NumArgOperands; ++i) {
2174 Type *Ty = I.getArgOperand(i)->getType();
2175 if (Ty != RetTy)
2176 return false;
2177 }
2178
2179 IRBuilder<> IRB(&I);
2180 ShadowAndOriginCombiner SC(this, IRB);
2181 for (unsigned i = 0; i < NumArgOperands; ++i)
2182 SC.Add(I.getArgOperand(i));
2183 SC.Done(&I);
2184
2185 return true;
2186 }
2187
2188 /// \brief Heuristically instrument unknown intrinsics.
2189 ///
2190 /// The main purpose of this code is to do something reasonable with all
2191 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2192 /// We recognize several classes of intrinsics by their argument types and
2193 /// ModRefBehaviour and apply special intrumentation when we are reasonably
2194 /// sure that we know what the intrinsic does.
2195 ///
2196 /// We special-case intrinsics where this approach fails. See llvm.bswap
2197 /// handling as an example of that.
2198 bool handleUnknownIntrinsic(IntrinsicInst &I) {
2199 unsigned NumArgOperands = I.getNumArgOperands();
2200 if (NumArgOperands == 0)
2201 return false;
2202
2203 if (NumArgOperands == 2 &&
2204 I.getArgOperand(0)->getType()->isPointerTy() &&
2205 I.getArgOperand(1)->getType()->isVectorTy() &&
2206 I.getType()->isVoidTy() &&
2207 !I.onlyReadsMemory()) {
2208 // This looks like a vector store.
2209 return handleVectorStoreIntrinsic(I);
2210 }
2211
2212 if (NumArgOperands == 1 &&
2213 I.getArgOperand(0)->getType()->isPointerTy() &&
2214 I.getType()->isVectorTy() &&
2215 I.onlyReadsMemory()) {
2216 // This looks like a vector load.
2217 return handleVectorLoadIntrinsic(I);
2218 }
2219
2220 if (I.doesNotAccessMemory())
2221 if (maybeHandleSimpleNomemIntrinsic(I))
2222 return true;
2223
2224 // FIXME: detect and handle SSE maskstore/maskload
2225 return false;
2226 }
2227
2228 void handleBswap(IntrinsicInst &I) {
2229 IRBuilder<> IRB(&I);
2230 Value *Op = I.getArgOperand(0);
2231 Type *OpType = Op->getType();
2232 Function *BswapFunc = Intrinsic::getDeclaration(
2233 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2234 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2235 setOrigin(&I, getOrigin(Op));
2236 }
2237
2238 // \brief Instrument vector convert instrinsic.
2239 //
2240 // This function instruments intrinsics like cvtsi2ss:
2241 // %Out = int_xxx_cvtyyy(%ConvertOp)
2242 // or
2243 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2244 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2245 // number \p Out elements, and (if has 2 arguments) copies the rest of the
2246 // elements from \p CopyOp.
2247 // In most cases conversion involves floating-point value which may trigger a
2248 // hardware exception when not fully initialized. For this reason we require
2249 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2250 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2251 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2252 // return a fully initialized value.
2253 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2254 IRBuilder<> IRB(&I);
2255 Value *CopyOp, *ConvertOp;
2256
2257 switch (I.getNumArgOperands()) {
2258 case 3:
2259 assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode")(static_cast <bool> (isa<ConstantInt>(I.getArgOperand
(2)) && "Invalid rounding mode") ? void (0) : __assert_fail
("isa<ConstantInt>(I.getArgOperand(2)) && \"Invalid rounding mode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2259, __extension__ __PRETTY_FUNCTION__))
;
2260 LLVM_FALLTHROUGH[[clang::fallthrough]];
2261 case 2:
2262 CopyOp = I.getArgOperand(0);
2263 ConvertOp = I.getArgOperand(1);
2264 break;
2265 case 1:
2266 ConvertOp = I.getArgOperand(0);
2267 CopyOp = nullptr;
2268 break;
2269 default:
2270 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.")::llvm::llvm_unreachable_internal("Cvt intrinsic with unsupported number of arguments."
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2270)
;
2271 }
2272
2273 // The first *NumUsedElements* elements of ConvertOp are converted to the
2274 // same number of output elements. The rest of the output is copied from
2275 // CopyOp, or (if not available) filled with zeroes.
2276 // Combine shadow for elements of ConvertOp that are used in this operation,
2277 // and insert a check.
2278 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2279 // int->any conversion.
2280 Value *ConvertShadow = getShadow(ConvertOp);
2281 Value *AggShadow = nullptr;
2282 if (ConvertOp->getType()->isVectorTy()) {
2283 AggShadow = IRB.CreateExtractElement(
2284 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2285 for (int i = 1; i < NumUsedElements; ++i) {
2286 Value *MoreShadow = IRB.CreateExtractElement(
2287 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2288 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2289 }
2290 } else {
2291 AggShadow = ConvertShadow;
2292 }
2293 assert(AggShadow->getType()->isIntegerTy())(static_cast <bool> (AggShadow->getType()->isIntegerTy
()) ? void (0) : __assert_fail ("AggShadow->getType()->isIntegerTy()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2293, __extension__ __PRETTY_FUNCTION__))
;
2294 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2295
2296 // Build result shadow by zero-filling parts of CopyOp shadow that come from
2297 // ConvertOp.
2298 if (CopyOp) {
2299 assert(CopyOp->getType() == I.getType())(static_cast <bool> (CopyOp->getType() == I.getType(
)) ? void (0) : __assert_fail ("CopyOp->getType() == I.getType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2299, __extension__ __PRETTY_FUNCTION__))
;
2300 assert(CopyOp->getType()->isVectorTy())(static_cast <bool> (CopyOp->getType()->isVectorTy
()) ? void (0) : __assert_fail ("CopyOp->getType()->isVectorTy()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2300, __extension__ __PRETTY_FUNCTION__))
;
2301 Value *ResultShadow = getShadow(CopyOp);
2302 Type *EltTy = ResultShadow->getType()->getVectorElementType();
2303 for (int i = 0; i < NumUsedElements; ++i) {
2304 ResultShadow = IRB.CreateInsertElement(
2305 ResultShadow, ConstantInt::getNullValue(EltTy),
2306 ConstantInt::get(IRB.getInt32Ty(), i));
2307 }
2308 setShadow(&I, ResultShadow);
2309 setOrigin(&I, getOrigin(CopyOp));
2310 } else {
2311 setShadow(&I, getCleanShadow(&I));
2312 setOrigin(&I, getCleanOrigin());
2313 }
2314 }
2315
2316 // Given a scalar or vector, extract lower 64 bits (or less), and return all
2317 // zeroes if it is zero, and all ones otherwise.
2318 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2319 if (S->getType()->isVectorTy())
2320 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2321 assert(S->getType()->getPrimitiveSizeInBits() <= 64)(static_cast <bool> (S->getType()->getPrimitiveSizeInBits
() <= 64) ? void (0) : __assert_fail ("S->getType()->getPrimitiveSizeInBits() <= 64"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2321, __extension__ __PRETTY_FUNCTION__))
;
2322 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2323 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2324 }
2325
2326 // Given a vector, extract its first element, and return all
2327 // zeroes if it is zero, and all ones otherwise.
2328 Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2329 Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2330 Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2331 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2332 }
2333
2334 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2335 Type *T = S->getType();
2336 assert(T->isVectorTy())(static_cast <bool> (T->isVectorTy()) ? void (0) : __assert_fail
("T->isVectorTy()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2336, __extension__ __PRETTY_FUNCTION__))
;
2337 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2338 return IRB.CreateSExt(S2, T);
2339 }
2340
2341 // \brief Instrument vector shift instrinsic.
2342 //
2343 // This function instruments intrinsics like int_x86_avx2_psll_w.
2344 // Intrinsic shifts %In by %ShiftSize bits.
2345 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2346 // size, and the rest is ignored. Behavior is defined even if shift size is
2347 // greater than register (or field) width.
2348 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2349 assert(I.getNumArgOperands() == 2)(static_cast <bool> (I.getNumArgOperands() == 2) ? void
(0) : __assert_fail ("I.getNumArgOperands() == 2", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2349, __extension__ __PRETTY_FUNCTION__))
;
2350 IRBuilder<> IRB(&I);
2351 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2352 // Otherwise perform the same shift on S1.
2353 Value *S1 = getShadow(&I, 0);
2354 Value *S2 = getShadow(&I, 1);
2355 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2356 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2357 Value *V1 = I.getOperand(0);
2358 Value *V2 = I.getOperand(1);
2359 Value *Shift = IRB.CreateCall(I.getCalledValue(),
2360 {IRB.CreateBitCast(S1, V1->getType()), V2});
2361 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2362 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2363 setOriginForNaryOp(I);
2364 }
2365
2366 // \brief Get an X86_MMX-sized vector type.
2367 Type *getMMXVectorTy(unsigned EltSizeInBits) {
2368 const unsigned X86_MMXSizeInBits = 64;
2369 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2370 X86_MMXSizeInBits / EltSizeInBits);
6
Division by zero
2371 }
2372
2373 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
2374 // intrinsic.
2375 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2376 switch (id) {
2377 case Intrinsic::x86_sse2_packsswb_128:
2378 case Intrinsic::x86_sse2_packuswb_128:
2379 return Intrinsic::x86_sse2_packsswb_128;
2380
2381 case Intrinsic::x86_sse2_packssdw_128:
2382 case Intrinsic::x86_sse41_packusdw:
2383 return Intrinsic::x86_sse2_packssdw_128;
2384
2385 case Intrinsic::x86_avx2_packsswb:
2386 case Intrinsic::x86_avx2_packuswb:
2387 return Intrinsic::x86_avx2_packsswb;
2388
2389 case Intrinsic::x86_avx2_packssdw:
2390 case Intrinsic::x86_avx2_packusdw:
2391 return Intrinsic::x86_avx2_packssdw;
2392
2393 case Intrinsic::x86_mmx_packsswb:
2394 case Intrinsic::x86_mmx_packuswb:
2395 return Intrinsic::x86_mmx_packsswb;
2396
2397 case Intrinsic::x86_mmx_packssdw:
2398 return Intrinsic::x86_mmx_packssdw;
2399 default:
2400 llvm_unreachable("unexpected intrinsic id")::llvm::llvm_unreachable_internal("unexpected intrinsic id", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2400)
;
2401 }
2402 }
2403
2404 // \brief Instrument vector pack instrinsic.
2405 //
2406 // This function instruments intrinsics like x86_mmx_packsswb, that
2407 // packs elements of 2 input vectors into half as many bits with saturation.
2408 // Shadow is propagated with the signed variant of the same intrinsic applied
2409 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2410 // EltSizeInBits is used only for x86mmx arguments.
2411 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2412 assert(I.getNumArgOperands() == 2)(static_cast <bool> (I.getNumArgOperands() == 2) ? void
(0) : __assert_fail ("I.getNumArgOperands() == 2", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2412, __extension__ __PRETTY_FUNCTION__))
;
2413 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2414 IRBuilder<> IRB(&I);
2415 Value *S1 = getShadow(&I, 0);
2416 Value *S2 = getShadow(&I, 1);
2417 assert(isX86_MMX || S1->getType()->isVectorTy())(static_cast <bool> (isX86_MMX || S1->getType()->
isVectorTy()) ? void (0) : __assert_fail ("isX86_MMX || S1->getType()->isVectorTy()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2417, __extension__ __PRETTY_FUNCTION__))
;
2418
2419 // SExt and ICmpNE below must apply to individual elements of input vectors.
2420 // In case of x86mmx arguments, cast them to appropriate vector types and
2421 // back.
2422 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2423 if (isX86_MMX) {
2424 S1 = IRB.CreateBitCast(S1, T);
2425 S2 = IRB.CreateBitCast(S2, T);
2426 }
2427 Value *S1_ext = IRB.CreateSExt(
2428 IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
2429 Value *S2_ext = IRB.CreateSExt(
2430 IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
2431 if (isX86_MMX) {
2432 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2433 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2434 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2435 }
2436
2437 Function *ShadowFn = Intrinsic::getDeclaration(
2438 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2439
2440 Value *S =
2441 IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2442 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2443 setShadow(&I, S);
2444 setOriginForNaryOp(I);
2445 }
2446
2447 // \brief Instrument sum-of-absolute-differencies intrinsic.
2448 void handleVectorSadIntrinsic(IntrinsicInst &I) {
2449 const unsigned SignificantBitsPerResultElement = 16;
2450 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2451 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2452 unsigned ZeroBitsPerResultElement =
2453 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2454
2455 IRBuilder<> IRB(&I);
2456 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2457 S = IRB.CreateBitCast(S, ResTy);
2458 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2459 ResTy);
2460 S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2461 S = IRB.CreateBitCast(S, getShadowTy(&I));
2462 setShadow(&I, S);
2463 setOriginForNaryOp(I);
2464 }
2465
2466 // \brief Instrument multiply-add intrinsic.
2467 void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2468 unsigned EltSizeInBits = 0) {
2469 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2470 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
3
'?' condition is true
4
Passing the value 0 via 1st parameter 'EltSizeInBits'
5
Calling 'MemorySanitizerVisitor::getMMXVectorTy'
2471 IRBuilder<> IRB(&I);
2472 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2473 S = IRB.CreateBitCast(S, ResTy);
2474 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2475 ResTy);
2476 S = IRB.CreateBitCast(S, getShadowTy(&I));
2477 setShadow(&I, S);
2478 setOriginForNaryOp(I);
2479 }
2480
2481 // \brief Instrument compare-packed intrinsic.
2482 // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2483 // all-ones shadow.
2484 void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2485 IRBuilder<> IRB(&I);
2486 Type *ResTy = getShadowTy(&I);
2487 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2488 Value *S = IRB.CreateSExt(
2489 IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2490 setShadow(&I, S);
2491 setOriginForNaryOp(I);
2492 }
2493
2494 // \brief Instrument compare-scalar intrinsic.
2495 // This handles both cmp* intrinsics which return the result in the first
2496 // element of a vector, and comi* which return the result as i32.
2497 void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2498 IRBuilder<> IRB(&I);
2499 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2500 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2501 setShadow(&I, S);
2502 setOriginForNaryOp(I);
2503 }
2504
2505 void handleStmxcsr(IntrinsicInst &I) {
2506 IRBuilder<> IRB(&I);
2507 Value* Addr = I.getArgOperand(0);
2508 Type *Ty = IRB.getInt32Ty();
2509 Value *ShadowPtr =
2510 getShadowOriginPtr(Addr, IRB, Ty, /*Alignment*/ 1, /*isStore*/ true)
2511 .first;
2512
2513 IRB.CreateStore(getCleanShadow(Ty),
2514 IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
2515
2516 if (ClCheckAccessAddress)
2517 insertShadowCheck(Addr, &I);
2518 }
2519
2520 void handleLdmxcsr(IntrinsicInst &I) {
2521 if (!InsertChecks) return;
2522
2523 IRBuilder<> IRB(&I);
2524 Value *Addr = I.getArgOperand(0);
2525 Type *Ty = IRB.getInt32Ty();
2526 unsigned Alignment = 1;
2527 Value *ShadowPtr, *OriginPtr;
2528 std::tie(ShadowPtr, OriginPtr) =
2529 getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
2530
2531 if (ClCheckAccessAddress)
2532 insertShadowCheck(Addr, &I);
2533
2534 Value *Shadow = IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_ldmxcsr");
2535 Value *Origin =
2536 MS.TrackOrigins ? IRB.CreateLoad(OriginPtr) : getCleanOrigin();
2537 insertShadowCheck(Shadow, Origin, &I);
2538 }
2539
2540 void visitIntrinsicInst(IntrinsicInst &I) {
2541 switch (I.getIntrinsicID()) {
1
Control jumps to 'case x86_avx2_pmadd_ub_sw:' at line 2706
2542 case Intrinsic::bswap:
2543 handleBswap(I);
2544 break;
2545 case Intrinsic::x86_sse_stmxcsr:
2546 handleStmxcsr(I);
2547 break;
2548 case Intrinsic::x86_sse_ldmxcsr:
2549 handleLdmxcsr(I);
2550 break;
2551 case Intrinsic::x86_avx512_vcvtsd2usi64:
2552 case Intrinsic::x86_avx512_vcvtsd2usi32:
2553 case Intrinsic::x86_avx512_vcvtss2usi64:
2554 case Intrinsic::x86_avx512_vcvtss2usi32:
2555 case Intrinsic::x86_avx512_cvttss2usi64:
2556 case Intrinsic::x86_avx512_cvttss2usi:
2557 case Intrinsic::x86_avx512_cvttsd2usi64:
2558 case Intrinsic::x86_avx512_cvttsd2usi:
2559 case Intrinsic::x86_avx512_cvtusi2sd:
2560 case Intrinsic::x86_avx512_cvtusi2ss:
2561 case Intrinsic::x86_avx512_cvtusi642sd:
2562 case Intrinsic::x86_avx512_cvtusi642ss:
2563 case Intrinsic::x86_sse2_cvtsd2si64:
2564 case Intrinsic::x86_sse2_cvtsd2si:
2565 case Intrinsic::x86_sse2_cvtsd2ss:
2566 case Intrinsic::x86_sse2_cvtsi2sd:
2567 case Intrinsic::x86_sse2_cvtsi642sd:
2568 case Intrinsic::x86_sse2_cvtss2sd:
2569 case Intrinsic::x86_sse2_cvttsd2si64:
2570 case Intrinsic::x86_sse2_cvttsd2si:
2571 case Intrinsic::x86_sse_cvtsi2ss:
2572 case Intrinsic::x86_sse_cvtsi642ss:
2573 case Intrinsic::x86_sse_cvtss2si64:
2574 case Intrinsic::x86_sse_cvtss2si:
2575 case Intrinsic::x86_sse_cvttss2si64:
2576 case Intrinsic::x86_sse_cvttss2si:
2577 handleVectorConvertIntrinsic(I, 1);
2578 break;
2579 case Intrinsic::x86_sse_cvtps2pi:
2580 case Intrinsic::x86_sse_cvttps2pi:
2581 handleVectorConvertIntrinsic(I, 2);
2582 break;
2583
2584 case Intrinsic::x86_avx512_psll_w_512:
2585 case Intrinsic::x86_avx512_psll_d_512:
2586 case Intrinsic::x86_avx512_psll_q_512:
2587 case Intrinsic::x86_avx512_pslli_w_512:
2588 case Intrinsic::x86_avx512_pslli_d_512:
2589 case Intrinsic::x86_avx512_pslli_q_512:
2590 case Intrinsic::x86_avx512_psrl_w_512:
2591 case Intrinsic::x86_avx512_psrl_d_512:
2592 case Intrinsic::x86_avx512_psrl_q_512:
2593 case Intrinsic::x86_avx512_psra_w_512:
2594 case Intrinsic::x86_avx512_psra_d_512:
2595 case Intrinsic::x86_avx512_psra_q_512:
2596 case Intrinsic::x86_avx512_psrli_w_512:
2597 case Intrinsic::x86_avx512_psrli_d_512:
2598 case Intrinsic::x86_avx512_psrli_q_512:
2599 case Intrinsic::x86_avx512_psrai_w_512:
2600 case Intrinsic::x86_avx512_psrai_d_512:
2601 case Intrinsic::x86_avx512_psrai_q_512:
2602 case Intrinsic::x86_avx512_psra_q_256:
2603 case Intrinsic::x86_avx512_psra_q_128:
2604 case Intrinsic::x86_avx512_psrai_q_256:
2605 case Intrinsic::x86_avx512_psrai_q_128:
2606 case Intrinsic::x86_avx2_psll_w:
2607 case Intrinsic::x86_avx2_psll_d:
2608 case Intrinsic::x86_avx2_psll_q:
2609 case Intrinsic::x86_avx2_pslli_w:
2610 case Intrinsic::x86_avx2_pslli_d:
2611 case Intrinsic::x86_avx2_pslli_q:
2612 case Intrinsic::x86_avx2_psrl_w:
2613 case Intrinsic::x86_avx2_psrl_d:
2614 case Intrinsic::x86_avx2_psrl_q:
2615 case Intrinsic::x86_avx2_psra_w:
2616 case Intrinsic::x86_avx2_psra_d:
2617 case Intrinsic::x86_avx2_psrli_w:
2618 case Intrinsic::x86_avx2_psrli_d:
2619 case Intrinsic::x86_avx2_psrli_q:
2620 case Intrinsic::x86_avx2_psrai_w:
2621 case Intrinsic::x86_avx2_psrai_d:
2622 case Intrinsic::x86_sse2_psll_w:
2623 case Intrinsic::x86_sse2_psll_d:
2624 case Intrinsic::x86_sse2_psll_q:
2625 case Intrinsic::x86_sse2_pslli_w:
2626 case Intrinsic::x86_sse2_pslli_d:
2627 case Intrinsic::x86_sse2_pslli_q:
2628 case Intrinsic::x86_sse2_psrl_w:
2629 case Intrinsic::x86_sse2_psrl_d:
2630 case Intrinsic::x86_sse2_psrl_q:
2631 case Intrinsic::x86_sse2_psra_w:
2632 case Intrinsic::x86_sse2_psra_d:
2633 case Intrinsic::x86_sse2_psrli_w:
2634 case Intrinsic::x86_sse2_psrli_d:
2635 case Intrinsic::x86_sse2_psrli_q:
2636 case Intrinsic::x86_sse2_psrai_w:
2637 case Intrinsic::x86_sse2_psrai_d:
2638 case Intrinsic::x86_mmx_psll_w:
2639 case Intrinsic::x86_mmx_psll_d:
2640 case Intrinsic::x86_mmx_psll_q:
2641 case Intrinsic::x86_mmx_pslli_w:
2642 case Intrinsic::x86_mmx_pslli_d:
2643 case Intrinsic::x86_mmx_pslli_q:
2644 case Intrinsic::x86_mmx_psrl_w:
2645 case Intrinsic::x86_mmx_psrl_d:
2646 case Intrinsic::x86_mmx_psrl_q:
2647 case Intrinsic::x86_mmx_psra_w:
2648 case Intrinsic::x86_mmx_psra_d:
2649 case Intrinsic::x86_mmx_psrli_w:
2650 case Intrinsic::x86_mmx_psrli_d:
2651 case Intrinsic::x86_mmx_psrli_q:
2652 case Intrinsic::x86_mmx_psrai_w:
2653 case Intrinsic::x86_mmx_psrai_d:
2654 handleVectorShiftIntrinsic(I, /* Variable */ false);
2655 break;
2656 case Intrinsic::x86_avx2_psllv_d:
2657 case Intrinsic::x86_avx2_psllv_d_256:
2658 case Intrinsic::x86_avx512_psllv_d_512:
2659 case Intrinsic::x86_avx2_psllv_q:
2660 case Intrinsic::x86_avx2_psllv_q_256:
2661 case Intrinsic::x86_avx512_psllv_q_512:
2662 case Intrinsic::x86_avx2_psrlv_d:
2663 case Intrinsic::x86_avx2_psrlv_d_256:
2664 case Intrinsic::x86_avx512_psrlv_d_512:
2665 case Intrinsic::x86_avx2_psrlv_q:
2666 case Intrinsic::x86_avx2_psrlv_q_256:
2667 case Intrinsic::x86_avx512_psrlv_q_512:
2668 case Intrinsic::x86_avx2_psrav_d:
2669 case Intrinsic::x86_avx2_psrav_d_256:
2670 case Intrinsic::x86_avx512_psrav_d_512:
2671 case Intrinsic::x86_avx512_psrav_q_128:
2672 case Intrinsic::x86_avx512_psrav_q_256:
2673 case Intrinsic::x86_avx512_psrav_q_512:
2674 handleVectorShiftIntrinsic(I, /* Variable */ true);
2675 break;
2676
2677 case Intrinsic::x86_sse2_packsswb_128:
2678 case Intrinsic::x86_sse2_packssdw_128:
2679 case Intrinsic::x86_sse2_packuswb_128:
2680 case Intrinsic::x86_sse41_packusdw:
2681 case Intrinsic::x86_avx2_packsswb:
2682 case Intrinsic::x86_avx2_packssdw:
2683 case Intrinsic::x86_avx2_packuswb:
2684 case Intrinsic::x86_avx2_packusdw:
2685 handleVectorPackIntrinsic(I);
2686 break;
2687
2688 case Intrinsic::x86_mmx_packsswb:
2689 case Intrinsic::x86_mmx_packuswb:
2690 handleVectorPackIntrinsic(I, 16);
2691 break;
2692
2693 case Intrinsic::x86_mmx_packssdw:
2694 handleVectorPackIntrinsic(I, 32);
2695 break;
2696
2697 case Intrinsic::x86_mmx_psad_bw:
2698 case Intrinsic::x86_sse2_psad_bw:
2699 case Intrinsic::x86_avx2_psad_bw:
2700 handleVectorSadIntrinsic(I);
2701 break;
2702
2703 case Intrinsic::x86_sse2_pmadd_wd:
2704 case Intrinsic::x86_avx2_pmadd_wd:
2705 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2706 case Intrinsic::x86_avx2_pmadd_ub_sw:
2707 handleVectorPmaddIntrinsic(I);
2
Calling 'MemorySanitizerVisitor::handleVectorPmaddIntrinsic'
2708 break;
2709
2710 case Intrinsic::x86_ssse3_pmadd_ub_sw:
2711 handleVectorPmaddIntrinsic(I, 8);
2712 break;
2713
2714 case Intrinsic::x86_mmx_pmadd_wd:
2715 handleVectorPmaddIntrinsic(I, 16);
2716 break;
2717
2718 case Intrinsic::x86_sse_cmp_ss:
2719 case Intrinsic::x86_sse2_cmp_sd:
2720 case Intrinsic::x86_sse_comieq_ss:
2721 case Intrinsic::x86_sse_comilt_ss:
2722 case Intrinsic::x86_sse_comile_ss:
2723 case Intrinsic::x86_sse_comigt_ss:
2724 case Intrinsic::x86_sse_comige_ss:
2725 case Intrinsic::x86_sse_comineq_ss:
2726 case Intrinsic::x86_sse_ucomieq_ss:
2727 case Intrinsic::x86_sse_ucomilt_ss:
2728 case Intrinsic::x86_sse_ucomile_ss:
2729 case Intrinsic::x86_sse_ucomigt_ss:
2730 case Intrinsic::x86_sse_ucomige_ss:
2731 case Intrinsic::x86_sse_ucomineq_ss:
2732 case Intrinsic::x86_sse2_comieq_sd:
2733 case Intrinsic::x86_sse2_comilt_sd:
2734 case Intrinsic::x86_sse2_comile_sd:
2735 case Intrinsic::x86_sse2_comigt_sd:
2736 case Intrinsic::x86_sse2_comige_sd:
2737 case Intrinsic::x86_sse2_comineq_sd:
2738 case Intrinsic::x86_sse2_ucomieq_sd:
2739 case Intrinsic::x86_sse2_ucomilt_sd:
2740 case Intrinsic::x86_sse2_ucomile_sd:
2741 case Intrinsic::x86_sse2_ucomigt_sd:
2742 case Intrinsic::x86_sse2_ucomige_sd:
2743 case Intrinsic::x86_sse2_ucomineq_sd:
2744 handleVectorCompareScalarIntrinsic(I);
2745 break;
2746
2747 case Intrinsic::x86_sse_cmp_ps:
2748 case Intrinsic::x86_sse2_cmp_pd:
2749 // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
2750 // generates reasonably looking IR that fails in the backend with "Do not
2751 // know how to split the result of this operator!".
2752 handleVectorComparePackedIntrinsic(I);
2753 break;
2754
2755 default:
2756 if (!handleUnknownIntrinsic(I))
2757 visitInstruction(I);
2758 break;
2759 }
2760 }
2761
2762 void visitCallSite(CallSite CS) {
2763 Instruction &I = *CS.getInstruction();
2764 assert(!I.getMetadata("nosanitize"))(static_cast <bool> (!I.getMetadata("nosanitize")) ? void
(0) : __assert_fail ("!I.getMetadata(\"nosanitize\")", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2764, __extension__ __PRETTY_FUNCTION__))
;
2765 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite")(static_cast <bool> ((CS.isCall() || CS.isInvoke()) &&
"Unknown type of CallSite") ? void (0) : __assert_fail ("(CS.isCall() || CS.isInvoke()) && \"Unknown type of CallSite\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2765, __extension__ __PRETTY_FUNCTION__))
;
2766 if (CS.isCall()) {
2767 CallInst *Call = cast<CallInst>(&I);
2768
2769 // For inline asm, do the usual thing: check argument shadow and mark all
2770 // outputs as clean. Note that any side effects of the inline asm that are
2771 // not immediately visible in its constraints are not handled.
2772 if (Call->isInlineAsm()) {
2773 if (ClHandleAsmConservative)
2774 visitAsmInstruction(I);
2775 else
2776 visitInstruction(I);
2777 return;
2778 }
2779
2780 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere")(static_cast <bool> (!isa<IntrinsicInst>(&I) &&
"intrinsics are handled elsewhere") ? void (0) : __assert_fail
("!isa<IntrinsicInst>(&I) && \"intrinsics are handled elsewhere\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2780, __extension__ __PRETTY_FUNCTION__))
;
2781
2782 // We are going to insert code that relies on the fact that the callee
2783 // will become a non-readonly function after it is instrumented by us. To
2784 // prevent this code from being optimized out, mark that function
2785 // non-readonly in advance.
2786 if (Function *Func = Call->getCalledFunction()) {
2787 // Clear out readonly/readnone attributes.
2788 AttrBuilder B;
2789 B.addAttribute(Attribute::ReadOnly)
2790 .addAttribute(Attribute::ReadNone);
2791 Func->removeAttributes(AttributeList::FunctionIndex, B);
2792 }
2793
2794 maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
2795 }
2796 IRBuilder<> IRB(&I);
2797
2798 unsigned ArgOffset = 0;
2799 DEBUG(dbgs() << " CallSite: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " CallSite: " << I <<
"\n"; } } while (false)
;
2800 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2801 ArgIt != End; ++ArgIt) {
2802 Value *A = *ArgIt;
2803 unsigned i = ArgIt - CS.arg_begin();
2804 if (!A->getType()->isSized()) {
2805 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Arg " << i << " is not sized: "
<< I << "\n"; } } while (false)
;
2806 continue;
2807 }
2808 unsigned Size = 0;
2809 Value *Store = nullptr;
2810 // Compute the Shadow for arg even if it is ByVal, because
2811 // in that case getShadow() will copy the actual arg shadow to
2812 // __msan_param_tls.
2813 Value *ArgShadow = getShadow(A);
2814 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2815 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Arg#" << i << ": "
<< *A << " Shadow: " << *ArgShadow <<
"\n"; } } while (false)
2816 " Shadow: " << *ArgShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Arg#" << i << ": "
<< *A << " Shadow: " << *ArgShadow <<
"\n"; } } while (false)
;
2817 bool ArgIsInitialized = false;
2818 const DataLayout &DL = F.getParent()->getDataLayout();
2819 if (CS.paramHasAttr(i, Attribute::ByVal)) {
2820 assert(A->getType()->isPointerTy() &&(static_cast <bool> (A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!") ? void (0) : __assert_fail
("A->getType()->isPointerTy() && \"ByVal argument is not a pointer!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2821, __extension__ __PRETTY_FUNCTION__))
2821 "ByVal argument is not a pointer!")(static_cast <bool> (A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!") ? void (0) : __assert_fail
("A->getType()->isPointerTy() && \"ByVal argument is not a pointer!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2821, __extension__ __PRETTY_FUNCTION__))
;
2822 Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
2823 if (ArgOffset + Size > kParamTLSSize) break;
2824 unsigned ParamAlignment = CS.getParamAlignment(i);
2825 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
2826 Value *AShadowPtr = getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
2827 Alignment, /*isStore*/ false)
2828 .first;
2829
2830 Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
2831 Alignment, Size);
2832 } else {
2833 Size = DL.getTypeAllocSize(A->getType());
2834 if (ArgOffset + Size > kParamTLSSize) break;
2835 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2836 kShadowTLSAlignment);
2837 Constant *Cst = dyn_cast<Constant>(ArgShadow);
2838 if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
2839 }
2840 if (MS.TrackOrigins && !ArgIsInitialized)
2841 IRB.CreateStore(getOrigin(A),
2842 getOriginPtrForArgument(A, IRB, ArgOffset));
2843 (void)Store;
2844 assert(Size != 0 && Store != nullptr)(static_cast <bool> (Size != 0 && Store != nullptr
) ? void (0) : __assert_fail ("Size != 0 && Store != nullptr"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2844, __extension__ __PRETTY_FUNCTION__))
;
2845 DEBUG(dbgs() << " Param:" << *Store << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Param:" << *Store <<
"\n"; } } while (false)
;
2846 ArgOffset += alignTo(Size, 8);
2847 }
2848 DEBUG(dbgs() << " done with call args\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " done with call args\n"; } } while
(false)
;
2849
2850 FunctionType *FT =
2851 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2852 if (FT->isVarArg()) {
2853 VAHelper->visitCallSite(CS, IRB);
2854 }
2855
2856 // Now, get the shadow for the RetVal.
2857 if (!I.getType()->isSized()) return;
2858 // Don't emit the epilogue for musttail call returns.
2859 if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
2860 IRBuilder<> IRBBefore(&I);
2861 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2862 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2863 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2864 BasicBlock::iterator NextInsn;
2865 if (CS.isCall()) {
2866 NextInsn = ++I.getIterator();
2867 assert(NextInsn != I.getParent()->end())(static_cast <bool> (NextInsn != I.getParent()->end(
)) ? void (0) : __assert_fail ("NextInsn != I.getParent()->end()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2867, __extension__ __PRETTY_FUNCTION__))
;
2868 } else {
2869 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2870 if (!NormalDest->getSinglePredecessor()) {
2871 // FIXME: this case is tricky, so we are just conservative here.
2872 // Perhaps we need to split the edge between this BB and NormalDest,
2873 // but a naive attempt to use SplitEdge leads to a crash.
2874 setShadow(&I, getCleanShadow(&I));
2875 setOrigin(&I, getCleanOrigin());
2876 return;
2877 }
2878 // FIXME: NextInsn is likely in a basic block that has not been visited yet.
2879 // Anything inserted there will be instrumented by MSan later!
2880 NextInsn = NormalDest->getFirstInsertionPt();
2881 assert(NextInsn != NormalDest->end() &&(static_cast <bool> (NextInsn != NormalDest->end() &&
"Could not find insertion point for retval shadow load") ? void
(0) : __assert_fail ("NextInsn != NormalDest->end() && \"Could not find insertion point for retval shadow load\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2882, __extension__ __PRETTY_FUNCTION__))
2882 "Could not find insertion point for retval shadow load")(static_cast <bool> (NextInsn != NormalDest->end() &&
"Could not find insertion point for retval shadow load") ? void
(0) : __assert_fail ("NextInsn != NormalDest->end() && \"Could not find insertion point for retval shadow load\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 2882, __extension__ __PRETTY_FUNCTION__))
;
2883 }
2884 IRBuilder<> IRBAfter(&*NextInsn);
2885 Value *RetvalShadow =
2886 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2887 kShadowTLSAlignment, "_msret");
2888 setShadow(&I, RetvalShadow);
2889 if (MS.TrackOrigins)
2890 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2891 }
2892
2893 bool isAMustTailRetVal(Value *RetVal) {
2894 if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
2895 RetVal = I->getOperand(0);
2896 }
2897 if (auto *I = dyn_cast<CallInst>(RetVal)) {
2898 return I->isMustTailCall();
2899 }
2900 return false;
2901 }
2902
2903 void visitReturnInst(ReturnInst &I) {
2904 IRBuilder<> IRB(&I);
2905 Value *RetVal = I.getReturnValue();
2906 if (!RetVal) return;
2907 // Don't emit the epilogue for musttail call returns.
2908 if (isAMustTailRetVal(RetVal)) return;
2909 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2910 if (CheckReturnValue) {
2911 insertShadowCheck(RetVal, &I);
2912 Value *Shadow = getCleanShadow(RetVal);
2913 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2914 } else {
2915 Value *Shadow = getShadow(RetVal);
2916 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2917 if (MS.TrackOrigins)
2918 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2919 }
2920 }
2921
2922 void visitPHINode(PHINode &I) {
2923 IRBuilder<> IRB(&I);
2924 if (!PropagateShadow) {
2925 setShadow(&I, getCleanShadow(&I));
2926 setOrigin(&I, getCleanOrigin());
2927 return;
2928 }
2929
2930 ShadowPHINodes.push_back(&I);
2931 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2932 "_msphi_s"));
2933 if (MS.TrackOrigins)
2934 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2935 "_msphi_o"));
2936 }
2937
2938 void visitAllocaInst(AllocaInst &I) {
2939 setShadow(&I, getCleanShadow(&I));
2940 setOrigin(&I, getCleanOrigin());
2941 IRBuilder<> IRB(I.getNextNode());
2942 const DataLayout &DL = F.getParent()->getDataLayout();
2943 uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
2944 Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
2945 if (I.isArrayAllocation())
2946 Len = IRB.CreateMul(Len, I.getArraySize());
2947 if (PoisonStack && ClPoisonStackWithCall) {
2948 IRB.CreateCall(MS.MsanPoisonStackFn,
2949 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
2950 } else {
2951 Value *ShadowBase = getShadowOriginPtr(&I, IRB, IRB.getInt8Ty(),
2952 I.getAlignment(), /*isStore*/ true)
2953 .first;
2954
2955 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2956 IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlignment());
2957 }
2958
2959 if (PoisonStack && MS.TrackOrigins) {
2960 SmallString<2048> StackDescriptionStorage;
2961 raw_svector_ostream StackDescription(StackDescriptionStorage);
2962 // We create a string with a description of the stack allocation and
2963 // pass it into __msan_set_alloca_origin.
2964 // It will be printed by the run-time if stack-originated UMR is found.
2965 // The first 4 bytes of the string are set to '----' and will be replaced
2966 // by __msan_va_arg_overflow_size_tls at the first call.
2967 StackDescription << "----" << I.getName() << "@" << F.getName();
2968 Value *Descr =
2969 createPrivateNonConstGlobalForString(*F.getParent(),
2970 StackDescription.str());
2971
2972 IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
2973 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
2974 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2975 IRB.CreatePointerCast(&F, MS.IntptrTy)});
2976 }
2977 }
2978
2979 void visitSelectInst(SelectInst& I) {
2980 IRBuilder<> IRB(&I);
2981 // a = select b, c, d
2982 Value *B = I.getCondition();
2983 Value *C = I.getTrueValue();
2984 Value *D = I.getFalseValue();
2985 Value *Sb = getShadow(B);
2986 Value *Sc = getShadow(C);
2987 Value *Sd = getShadow(D);
2988
2989 // Result shadow if condition shadow is 0.
2990 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2991 Value *Sa1;
2992 if (I.getType()->isAggregateType()) {
2993 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2994 // an extra "select". This results in much more compact IR.
2995 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2996 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2997 } else {
2998 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2999 // If Sb (condition is poisoned), look for bits in c and d that are equal
3000 // and both unpoisoned.
3001 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
3002
3003 // Cast arguments to shadow-compatible type.
3004 C = CreateAppToShadowCast(IRB, C);
3005 D = CreateAppToShadowCast(IRB, D);
3006
3007 // Result shadow if condition shadow is 1.
3008 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
3009 }
3010 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
3011 setShadow(&I, Sa);
3012 if (MS.TrackOrigins) {
3013 // Origins are always i32, so any vector conditions must be flattened.
3014 // FIXME: consider tracking vector origins for app vectors?
3015 if (B->getType()->isVectorTy()) {
3016 Type *FlatTy = getShadowTyNoVec(B->getType());
3017 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
3018 ConstantInt::getNullValue(FlatTy));
3019 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
3020 ConstantInt::getNullValue(FlatTy));
3021 }
3022 // a = select b, c, d
3023 // Oa = Sb ? Ob : (b ? Oc : Od)
3024 setOrigin(
3025 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
3026 IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
3027 getOrigin(I.getFalseValue()))));
3028 }
3029 }
3030
3031 void visitLandingPadInst(LandingPadInst &I) {
3032 // Do nothing.
3033 // See https://github.com/google/sanitizers/issues/504
3034 setShadow(&I, getCleanShadow(&I));
3035 setOrigin(&I, getCleanOrigin());
3036 }
3037
3038 void visitCatchSwitchInst(CatchSwitchInst &I) {
3039 setShadow(&I, getCleanShadow(&I));
3040 setOrigin(&I, getCleanOrigin());
3041 }
3042
3043 void visitFuncletPadInst(FuncletPadInst &I) {
3044 setShadow(&I, getCleanShadow(&I));
3045 setOrigin(&I, getCleanOrigin());
3046 }
3047
3048 void visitGetElementPtrInst(GetElementPtrInst &I) {
3049 handleShadowOr(I);
3050 }
3051
3052 void visitExtractValueInst(ExtractValueInst &I) {
3053 IRBuilder<> IRB(&I);
3054 Value *Agg = I.getAggregateOperand();
3055 DEBUG(dbgs() << "ExtractValue: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "ExtractValue: " << I <<
"\n"; } } while (false)
;
3056 Value *AggShadow = getShadow(Agg);
3057 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " AggShadow: " << *AggShadow
<< "\n"; } } while (false)
;
3058 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
3059 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " ResShadow: " << *ResShadow
<< "\n"; } } while (false)
;
3060 setShadow(&I, ResShadow);
3061 setOriginForNaryOp(I);
3062 }
3063
3064 void visitInsertValueInst(InsertValueInst &I) {
3065 IRBuilder<> IRB(&I);
3066 DEBUG(dbgs() << "InsertValue: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "InsertValue: " << I <<
"\n"; } } while (false)
;
3067 Value *AggShadow = getShadow(I.getAggregateOperand());
3068 Value *InsShadow = getShadow(I.getInsertedValueOperand());
3069 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " AggShadow: " << *AggShadow
<< "\n"; } } while (false)
;
3070 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " InsShadow: " << *InsShadow
<< "\n"; } } while (false)
;
3071 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
3072 DEBUG(dbgs() << " Res: " << *Res << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << " Res: " << *Res <<
"\n"; } } while (false)
;
3073 setShadow(&I, Res);
3074 setOriginForNaryOp(I);
3075 }
3076
3077 void dumpInst(Instruction &I) {
3078 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
3079 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
3080 } else {
3081 errs() << "ZZZ " << I.getOpcodeName() << "\n";
3082 }
3083 errs() << "QQQ " << I << "\n";
3084 }
3085
3086 void visitResumeInst(ResumeInst &I) {
3087 DEBUG(dbgs() << "Resume: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "Resume: " << I << "\n"
; } } while (false)
;
3088 // Nothing to do here.
3089 }
3090
3091 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
3092 DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "CleanupReturn: " << CRI <<
"\n"; } } while (false)
;
3093 // Nothing to do here.
3094 }
3095
3096 void visitCatchReturnInst(CatchReturnInst &CRI) {
3097 DEBUG(dbgs() << "CatchReturn: " << CRI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "CatchReturn: " << CRI <<
"\n"; } } while (false)
;
3098 // Nothing to do here.
3099 }
3100
3101 void visitAsmInstruction(Instruction &I) {
3102 // Conservative inline assembly handling: check for poisoned shadow of
3103 // asm() arguments, then unpoison the result and all the memory locations
3104 // pointed to by those arguments.
3105 CallInst *CI = dyn_cast<CallInst>(&I);
3106
3107 for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) {
3108 Value *Operand = CI->getOperand(i);
3109 if (Operand->getType()->isSized())
3110 insertShadowCheck(Operand, &I);
3111 }
3112 setShadow(&I, getCleanShadow(&I));
3113 setOrigin(&I, getCleanOrigin());
3114 IRBuilder<> IRB(&I);
3115 IRB.SetInsertPoint(I.getNextNode());
3116 for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) {
3117 Value *Operand = CI->getOperand(i);
3118 Type *OpType = Operand->getType();
3119 if (!OpType->isPointerTy())
3120 continue;
3121 Type *ElType = OpType->getPointerElementType();
3122 if (!ElType->isSized())
3123 continue;
3124 Value *ShadowPtr, *OriginPtr;
3125 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3126 Operand, IRB, ElType, /*Alignment*/ 1, /*isStore*/ true);
3127 Value *CShadow = getCleanShadow(ElType);
3128 IRB.CreateStore(
3129 CShadow,
3130 IRB.CreatePointerCast(ShadowPtr, CShadow->getType()->getPointerTo()));
3131 }
3132 }
3133
3134 void visitInstruction(Instruction &I) {
3135 // Everything else: stop propagating and check for poisoned shadow.
3136 if (ClDumpStrictInstructions)
3137 dumpInst(I);
3138 DEBUG(dbgs() << "DEFAULT: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("msan")) { dbgs() << "DEFAULT: " << I << "\n"
; } } while (false)
;
3139 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
3140 Value *Operand = I.getOperand(i);
3141 if (Operand->getType()->isSized())
3142 insertShadowCheck(Operand, &I);
3143 }
3144 setShadow(&I, getCleanShadow(&I));
3145 setOrigin(&I, getCleanOrigin());
3146 }
3147};
3148
3149/// \brief AMD64-specific implementation of VarArgHelper.
3150struct VarArgAMD64Helper : public VarArgHelper {
3151 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
3152 // See a comment in visitCallSite for more details.
3153 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
3154 static const unsigned AMD64FpEndOffset = 176;
3155
3156 Function &F;
3157 MemorySanitizer &MS;
3158 MemorySanitizerVisitor &MSV;
3159 Value *VAArgTLSCopy = nullptr;
3160 Value *VAArgOverflowSize = nullptr;
3161
3162 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3163
3164 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3165
3166 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
3167 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
3168
3169 ArgKind classifyArgument(Value* arg) {
3170 // A very rough approximation of X86_64 argument classification rules.
3171 Type *T = arg->getType();
3172 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
3173 return AK_FloatingPoint;
3174 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3175 return AK_GeneralPurpose;
3176 if (T->isPointerTy())
3177 return AK_GeneralPurpose;
3178 return AK_Memory;
3179 }
3180
3181 // For VarArg functions, store the argument shadow in an ABI-specific format
3182 // that corresponds to va_list layout.
3183 // We do this because Clang lowers va_arg in the frontend, and this pass
3184 // only sees the low level code that deals with va_list internals.
3185 // A much easier alternative (provided that Clang emits va_arg instructions)
3186 // would have been to associate each live instance of va_list with a copy of
3187 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
3188 // order.
3189 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3190 unsigned GpOffset = 0;
3191 unsigned FpOffset = AMD64GpEndOffset;
3192 unsigned OverflowOffset = AMD64FpEndOffset;
3193 const DataLayout &DL = F.getParent()->getDataLayout();
3194 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3195 ArgIt != End; ++ArgIt) {
3196 Value *A = *ArgIt;
3197 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3198 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3199 bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
3200 if (IsByVal) {
3201 // ByVal arguments always go to the overflow area.
3202 // Fixed arguments passed through the overflow area will be stepped
3203 // over by va_start, so don't count them towards the offset.
3204 if (IsFixed)
3205 continue;
3206 assert(A->getType()->isPointerTy())(static_cast <bool> (A->getType()->isPointerTy())
? void (0) : __assert_fail ("A->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3206, __extension__ __PRETTY_FUNCTION__))
;
3207 Type *RealTy = A->getType()->getPointerElementType();
3208 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3209 Value *ShadowBase =
3210 getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
3211 OverflowOffset += alignTo(ArgSize, 8);
3212 Value *ShadowPtr, *OriginPtr;
3213 std::tie(ShadowPtr, OriginPtr) =
3214 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
3215 /*isStore*/ false);
3216
3217 IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
3218 kShadowTLSAlignment, ArgSize);
3219 } else {
3220 ArgKind AK = classifyArgument(A);
3221 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
3222 AK = AK_Memory;
3223 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
3224 AK = AK_Memory;
3225 Value *ShadowBase;
3226 switch (AK) {
3227 case AK_GeneralPurpose:
3228 ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
3229 GpOffset += 8;
3230 break;
3231 case AK_FloatingPoint:
3232 ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
3233 FpOffset += 16;
3234 break;
3235 case AK_Memory:
3236 if (IsFixed)
3237 continue;
3238 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3239 ShadowBase =
3240 getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3241 OverflowOffset += alignTo(ArgSize, 8);
3242 }
3243 // Take fixed arguments into account for GpOffset and FpOffset,
3244 // but don't actually store shadows for them.
3245 if (IsFixed)
3246 continue;
3247 IRB.CreateAlignedStore(MSV.getShadow(A), ShadowBase,
3248 kShadowTLSAlignment);
3249 }
3250 }
3251 Constant *OverflowSize =
3252 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
3253 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3254 }
3255
3256 /// \brief Compute the shadow address for a given va_arg.
3257 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3258 int ArgOffset) {
3259 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3260 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3261 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3262 "_msarg");
3263 }
3264
3265 void unpoisonVAListTagForInst(IntrinsicInst &I) {
3266 IRBuilder<> IRB(&I);
3267 Value *VAListTag = I.getArgOperand(0);
3268 Value *ShadowPtr, *OriginPtr;
3269 unsigned Alignment = 8;
3270 std::tie(ShadowPtr, OriginPtr) =
3271 MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
3272 /*isStore*/ true);
3273
3274 // Unpoison the whole __va_list_tag.
3275 // FIXME: magic ABI constants.
3276 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3277 /* size */ 24, Alignment, false);
3278 // We shouldn't need to zero out the origins, as they're only checked for
3279 // nonzero shadow.
3280 }
3281
3282 void visitVAStartInst(VAStartInst &I) override {
3283 if (F.getCallingConv() == CallingConv::Win64)
3284 return;
3285 VAStartInstrumentationList.push_back(&I);
3286 unpoisonVAListTagForInst(I);
3287 }
3288
3289 void visitVACopyInst(VACopyInst &I) override {
3290 if (F.getCallingConv() == CallingConv::Win64) return;
3291 unpoisonVAListTagForInst(I);
3292 }
3293
3294 void finalizeInstrumentation() override {
3295 assert(!VAArgOverflowSize && !VAArgTLSCopy &&(static_cast <bool> (!VAArgOverflowSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3296, __extension__ __PRETTY_FUNCTION__))
3296 "finalizeInstrumentation called twice")(static_cast <bool> (!VAArgOverflowSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3296, __extension__ __PRETTY_FUNCTION__))
;
3297 if (!VAStartInstrumentationList.empty()) {
3298 // If there is a va_start in this function, make a backup copy of
3299 // va_arg_tls somewhere in the function entry block.
3300 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3301 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3302 Value *CopySize =
3303 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
3304 VAArgOverflowSize);
3305 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3306 IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
3307 }
3308
3309 // Instrument va_start.
3310 // Copy va_list shadow from the backup copy of the TLS contents.
3311 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3312 CallInst *OrigInst = VAStartInstrumentationList[i];
3313 IRBuilder<> IRB(OrigInst->getNextNode());
3314 Value *VAListTag = OrigInst->getArgOperand(0);
3315
3316 Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
3317 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3318 ConstantInt::get(MS.IntptrTy, 16)),
3319 PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
3320 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3321 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
3322 unsigned Alignment = 16;
3323 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
3324 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
3325 Alignment, /*isStore*/ true);
3326 IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
3327 AMD64FpEndOffset);
3328 Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
3329 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3330 ConstantInt::get(MS.IntptrTy, 8)),
3331 PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
3332 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
3333 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
3334 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
3335 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
3336 Alignment, /*isStore*/ true);
3337 Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
3338 AMD64FpEndOffset);
3339 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
3340 VAArgOverflowSize);
3341 }
3342 }
3343};
3344
3345/// \brief MIPS64-specific implementation of VarArgHelper.
3346struct VarArgMIPS64Helper : public VarArgHelper {
3347 Function &F;
3348 MemorySanitizer &MS;
3349 MemorySanitizerVisitor &MSV;
3350 Value *VAArgTLSCopy = nullptr;
3351 Value *VAArgSize = nullptr;
3352
3353 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3354
3355 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
3356 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
3357
3358 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3359 unsigned VAArgOffset = 0;
3360 const DataLayout &DL = F.getParent()->getDataLayout();
3361 for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
3362 CS.getFunctionType()->getNumParams(), End = CS.arg_end();
3363 ArgIt != End; ++ArgIt) {
3364 Triple TargetTriple(F.getParent()->getTargetTriple());
3365 Value *A = *ArgIt;
3366 Value *Base;
3367 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3368 if (TargetTriple.getArch() == Triple::mips64) {
3369 // Adjusting the shadow for argument with size < 8 to match the placement
3370 // of bits in big endian system
3371 if (ArgSize < 8)
3372 VAArgOffset += (8 - ArgSize);
3373 }
3374 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
3375 VAArgOffset += ArgSize;
3376 VAArgOffset = alignTo(VAArgOffset, 8);
3377 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3378 }
3379
3380 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
3381 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3382 // a new class member i.e. it is the total size of all VarArgs.
3383 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3384 }
3385
3386 /// \brief Compute the shadow address for a given va_arg.
3387 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3388 int ArgOffset) {
3389 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3390 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3391 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3392 "_msarg");
3393 }
3394
3395 void visitVAStartInst(VAStartInst &I) override {
3396 IRBuilder<> IRB(&I);
3397 VAStartInstrumentationList.push_back(&I);
3398 Value *VAListTag = I.getArgOperand(0);
3399 Value *ShadowPtr, *OriginPtr;
3400 unsigned Alignment = 8;
3401 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3402 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
3403 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3404 /* size */ 8, Alignment, false);
3405 }
3406
3407 void visitVACopyInst(VACopyInst &I) override {
3408 IRBuilder<> IRB(&I);
3409 VAStartInstrumentationList.push_back(&I);
3410 Value *VAListTag = I.getArgOperand(0);
3411 Value *ShadowPtr, *OriginPtr;
3412 unsigned Alignment = 8;
3413 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3414 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
3415 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3416 /* size */ 8, Alignment, false);
3417 }
3418
3419 void finalizeInstrumentation() override {
3420 assert(!VAArgSize && !VAArgTLSCopy &&(static_cast <bool> (!VAArgSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3421, __extension__ __PRETTY_FUNCTION__))
3421 "finalizeInstrumentation called twice")(static_cast <bool> (!VAArgSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3421, __extension__ __PRETTY_FUNCTION__))
;
3422 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3423 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3424 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3425 VAArgSize);
3426
3427 if (!VAStartInstrumentationList.empty()) {
3428 // If there is a va_start in this function, make a backup copy of
3429 // va_arg_tls somewhere in the function entry block.
3430 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3431 IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
3432 }
3433
3434 // Instrument va_start.
3435 // Copy va_list shadow from the backup copy of the TLS contents.
3436 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3437 CallInst *OrigInst = VAStartInstrumentationList[i];
3438 IRBuilder<> IRB(OrigInst->getNextNode());
3439 Value *VAListTag = OrigInst->getArgOperand(0);
3440 Value *RegSaveAreaPtrPtr =
3441 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3442 PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
3443 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3444 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
3445 unsigned Alignment = 8;
3446 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
3447 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
3448 Alignment, /*isStore*/ true);
3449 IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
3450 CopySize);
3451 }
3452 }
3453};
3454
3455/// \brief AArch64-specific implementation of VarArgHelper.
3456struct VarArgAArch64Helper : public VarArgHelper {
3457 static const unsigned kAArch64GrArgSize = 64;
3458 static const unsigned kAArch64VrArgSize = 128;
3459
3460 static const unsigned AArch64GrBegOffset = 0;
3461 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
3462 // Make VR space aligned to 16 bytes.
3463 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
3464 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
3465 + kAArch64VrArgSize;
3466 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
3467
3468 Function &F;
3469 MemorySanitizer &MS;
3470 MemorySanitizerVisitor &MSV;
3471 Value *VAArgTLSCopy = nullptr;
3472 Value *VAArgOverflowSize = nullptr;
3473
3474 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3475
3476 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3477
3478 VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
3479 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
3480
3481 ArgKind classifyArgument(Value* arg) {
3482 Type *T = arg->getType();
3483 if (T->isFPOrFPVectorTy())
3484 return AK_FloatingPoint;
3485 if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3486 || (T->isPointerTy()))
3487 return AK_GeneralPurpose;
3488 return AK_Memory;
3489 }
3490
3491 // The instrumentation stores the argument shadow in a non ABI-specific
3492 // format because it does not know which argument is named (since Clang,
3493 // like x86_64 case, lowers the va_args in the frontend and this pass only
3494 // sees the low level code that deals with va_list internals).
3495 // The first seven GR registers are saved in the first 56 bytes of the
3496 // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
3497 // the remaining arguments.
3498 // Using constant offset within the va_arg TLS array allows fast copy
3499 // in the finalize instrumentation.
3500 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3501 unsigned GrOffset = AArch64GrBegOffset;
3502 unsigned VrOffset = AArch64VrBegOffset;
3503 unsigned OverflowOffset = AArch64VAEndOffset;
3504
3505 const DataLayout &DL = F.getParent()->getDataLayout();
3506 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3507 ArgIt != End; ++ArgIt) {
3508 Value *A = *ArgIt;
3509 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3510 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3511 ArgKind AK = classifyArgument(A);
3512 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
3513 AK = AK_Memory;
3514 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
3515 AK = AK_Memory;
3516 Value *Base;
3517 switch (AK) {
3518 case AK_GeneralPurpose:
3519 Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
3520 GrOffset += 8;
3521 break;
3522 case AK_FloatingPoint:
3523 Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
3524 VrOffset += 16;
3525 break;
3526 case AK_Memory:
3527 // Don't count fixed arguments in the overflow area - va_start will
3528 // skip right over them.
3529 if (IsFixed)
3530 continue;
3531 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3532 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3533 OverflowOffset += alignTo(ArgSize, 8);
3534 break;
3535 }
3536 // Count Gp/Vr fixed arguments to their respective offsets, but don't
3537 // bother to actually store a shadow.
3538 if (IsFixed)
3539 continue;
3540 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3541 }
3542 Constant *OverflowSize =
3543 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
3544 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3545 }
3546
3547 /// Compute the shadow address for a given va_arg.
3548 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3549 int ArgOffset) {
3550 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3551 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3552 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3553 "_msarg");
3554 }
3555
3556 void visitVAStartInst(VAStartInst &I) override {
3557 IRBuilder<> IRB(&I);
3558 VAStartInstrumentationList.push_back(&I);
3559 Value *VAListTag = I.getArgOperand(0);
3560 Value *ShadowPtr, *OriginPtr;
3561 unsigned Alignment = 8;
3562 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3563 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
3564 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3565 /* size */ 32, Alignment, false);
3566 }
3567
3568 void visitVACopyInst(VACopyInst &I) override {
3569 IRBuilder<> IRB(&I);
3570 VAStartInstrumentationList.push_back(&I);
3571 Value *VAListTag = I.getArgOperand(0);
3572 Value *ShadowPtr, *OriginPtr;
3573 unsigned Alignment = 8;
3574 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3575 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
3576 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3577 /* size */ 32, Alignment, false);
3578 }
3579
3580 // Retrieve a va_list field of 'void*' size.
3581 Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3582 Value *SaveAreaPtrPtr =
3583 IRB.CreateIntToPtr(
3584 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3585 ConstantInt::get(MS.IntptrTy, offset)),
3586 Type::getInt64PtrTy(*MS.C));
3587 return IRB.CreateLoad(SaveAreaPtrPtr);
3588 }
3589
3590 // Retrieve a va_list field of 'int' size.
3591 Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3592 Value *SaveAreaPtr =
3593 IRB.CreateIntToPtr(
3594 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3595 ConstantInt::get(MS.IntptrTy, offset)),
3596 Type::getInt32PtrTy(*MS.C));
3597 Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
3598 return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
3599 }
3600
3601 void finalizeInstrumentation() override {
3602 assert(!VAArgOverflowSize && !VAArgTLSCopy &&(static_cast <bool> (!VAArgOverflowSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3603, __extension__ __PRETTY_FUNCTION__))
3603 "finalizeInstrumentation called twice")(static_cast <bool> (!VAArgOverflowSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgOverflowSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3603, __extension__ __PRETTY_FUNCTION__))
;
3604 if (!VAStartInstrumentationList.empty()) {
3605 // If there is a va_start in this function, make a backup copy of
3606 // va_arg_tls somewhere in the function entry block.
3607 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3608 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3609 Value *CopySize =
3610 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
3611 VAArgOverflowSize);
3612 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3613 IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
3614 }
3615
3616 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
3617 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
3618
3619 // Instrument va_start, copy va_list shadow from the backup copy of
3620 // the TLS contents.
3621 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3622 CallInst *OrigInst = VAStartInstrumentationList[i];
3623 IRBuilder<> IRB(OrigInst->getNextNode());
3624
3625 Value *VAListTag = OrigInst->getArgOperand(0);
3626
3627 // The variadic ABI for AArch64 creates two areas to save the incoming
3628 // argument registers (one for 64-bit general register xn-x7 and another
3629 // for 128-bit FP/SIMD vn-v7).
3630 // We need then to propagate the shadow arguments on both regions
3631 // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
3632 // The remaning arguments are saved on shadow for 'va::stack'.
3633 // One caveat is it requires only to propagate the non-named arguments,
3634 // however on the call site instrumentation 'all' the arguments are
3635 // saved. So to copy the shadow values from the va_arg TLS array
3636 // we need to adjust the offset for both GR and VR fields based on
3637 // the __{gr,vr}_offs value (since they are stores based on incoming
3638 // named arguments).
3639
3640 // Read the stack pointer from the va_list.
3641 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
3642
3643 // Read both the __gr_top and __gr_off and add them up.
3644 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
3645 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
3646
3647 Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
3648
3649 // Read both the __vr_top and __vr_off and add them up.
3650 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
3651 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
3652
3653 Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
3654
3655 // It does not know how many named arguments is being used and, on the
3656 // callsite all the arguments were saved. Since __gr_off is defined as
3657 // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
3658 // argument by ignoring the bytes of shadow from named arguments.
3659 Value *GrRegSaveAreaShadowPtrOff =
3660 IRB.CreateAdd(GrArgSize, GrOffSaveArea);
3661
3662 Value *GrRegSaveAreaShadowPtr =
3663 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
3664 /*Alignment*/ 8, /*isStore*/ true)
3665 .first;
3666
3667 Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3668 GrRegSaveAreaShadowPtrOff);
3669 Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
3670
3671 IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, 8, GrSrcPtr, 8, GrCopySize);
3672
3673 // Again, but for FP/SIMD values.
3674 Value *VrRegSaveAreaShadowPtrOff =
3675 IRB.CreateAdd(VrArgSize, VrOffSaveArea);
3676
3677 Value *VrRegSaveAreaShadowPtr =
3678 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
3679 /*Alignment*/ 8, /*isStore*/ true)
3680 .first;
3681
3682 Value *VrSrcPtr = IRB.CreateInBoundsGEP(
3683 IRB.getInt8Ty(),
3684 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3685 IRB.getInt32(AArch64VrBegOffset)),
3686 VrRegSaveAreaShadowPtrOff);
3687 Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
3688
3689 IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, 8, VrSrcPtr, 8, VrCopySize);
3690
3691 // And finally for remaining arguments.
3692 Value *StackSaveAreaShadowPtr =
3693 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
3694 /*Alignment*/ 16, /*isStore*/ true)
3695 .first;
3696
3697 Value *StackSrcPtr =
3698 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3699 IRB.getInt32(AArch64VAEndOffset));
3700
3701 IRB.CreateMemCpy(StackSaveAreaShadowPtr, 16, StackSrcPtr, 16,
3702 VAArgOverflowSize);
3703 }
3704 }
3705};
3706
3707/// \brief PowerPC64-specific implementation of VarArgHelper.
3708struct VarArgPowerPC64Helper : public VarArgHelper {
3709 Function &F;
3710 MemorySanitizer &MS;
3711 MemorySanitizerVisitor &MSV;
3712 Value *VAArgTLSCopy = nullptr;
3713 Value *VAArgSize = nullptr;
3714
3715 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3716
3717 VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
3718 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
3719
3720 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3721 // For PowerPC, we need to deal with alignment of stack arguments -
3722 // they are mostly aligned to 8 bytes, but vectors and i128 arrays
3723 // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
3724 // and QPX vectors are aligned to 32 bytes. For that reason, we
3725 // compute current offset from stack pointer (which is always properly
3726 // aligned), and offset for the first vararg, then subtract them.
3727 unsigned VAArgBase;
3728 Triple TargetTriple(F.getParent()->getTargetTriple());
3729 // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
3730 // and 32 bytes for ABIv2. This is usually determined by target
3731 // endianness, but in theory could be overriden by function attribute.
3732 // For simplicity, we ignore it here (it'd only matter for QPX vectors).
3733 if (TargetTriple.getArch() == Triple::ppc64)
3734 VAArgBase = 48;
3735 else
3736 VAArgBase = 32;
3737 unsigned VAArgOffset = VAArgBase;
3738 const DataLayout &DL = F.getParent()->getDataLayout();
3739 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3740 ArgIt != End; ++ArgIt) {
3741 Value *A = *ArgIt;
3742 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3743 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3744 bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
3745 if (IsByVal) {
3746 assert(A->getType()->isPointerTy())(static_cast <bool> (A->getType()->isPointerTy())
? void (0) : __assert_fail ("A->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3746, __extension__ __PRETTY_FUNCTION__))
;
3747 Type *RealTy = A->getType()->getPointerElementType();
3748 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3749 uint64_t ArgAlign = CS.getParamAlignment(ArgNo);
3750 if (ArgAlign < 8)
3751 ArgAlign = 8;
3752 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3753 if (!IsFixed) {
3754 Value *Base = getShadowPtrForVAArgument(RealTy, IRB,
3755 VAArgOffset - VAArgBase);
3756 Value *AShadowPtr, *AOriginPtr;
3757 std::tie(AShadowPtr, AOriginPtr) = MSV.getShadowOriginPtr(
3758 A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false);
3759
3760 IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
3761 kShadowTLSAlignment, ArgSize);
3762 }
3763 VAArgOffset += alignTo(ArgSize, 8);
3764 } else {
3765 Value *Base;
3766 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3767 uint64_t ArgAlign = 8;
3768 if (A->getType()->isArrayTy()) {
3769 // Arrays are aligned to element size, except for long double
3770 // arrays, which are aligned to 8 bytes.
3771 Type *ElementTy = A->getType()->getArrayElementType();
3772 if (!ElementTy->isPPC_FP128Ty())
3773 ArgAlign = DL.getTypeAllocSize(ElementTy);
3774 } else if (A->getType()->isVectorTy()) {
3775 // Vectors are naturally aligned.
3776 ArgAlign = DL.getTypeAllocSize(A->getType());
3777 }
3778 if (ArgAlign < 8)
3779 ArgAlign = 8;
3780 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3781 if (DL.isBigEndian()) {
3782 // Adjusting the shadow for argument with size < 8 to match the placement
3783 // of bits in big endian system
3784 if (ArgSize < 8)
3785 VAArgOffset += (8 - ArgSize);
3786 }
3787 if (!IsFixed) {
3788 Base = getShadowPtrForVAArgument(A->getType(), IRB,
3789 VAArgOffset - VAArgBase);
3790 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3791 }
3792 VAArgOffset += ArgSize;
3793 VAArgOffset = alignTo(VAArgOffset, 8);
3794 }
3795 if (IsFixed)
3796 VAArgBase = VAArgOffset;
3797 }
3798
3799 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
3800 VAArgOffset - VAArgBase);
3801 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3802 // a new class member i.e. it is the total size of all VarArgs.
3803 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3804 }
3805
3806 /// \brief Compute the shadow address for a given va_arg.
3807 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3808 int ArgOffset) {
3809 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3810 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3811 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3812 "_msarg");
3813 }
3814
3815 void visitVAStartInst(VAStartInst &I) override {
3816 IRBuilder<> IRB(&I);
3817 VAStartInstrumentationList.push_back(&I);
3818 Value *VAListTag = I.getArgOperand(0);
3819 Value *ShadowPtr, *OriginPtr;
3820 unsigned Alignment = 8;
3821 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3822 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
3823 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3824 /* size */ 8, Alignment, false);
3825 }
3826
3827 void visitVACopyInst(VACopyInst &I) override {
3828 IRBuilder<> IRB(&I);
3829 Value *VAListTag = I.getArgOperand(0);
3830 Value *ShadowPtr, *OriginPtr;
3831 unsigned Alignment = 8;
3832 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3833 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
3834 // Unpoison the whole __va_list_tag.
3835 // FIXME: magic ABI constants.
3836 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3837 /* size */ 8, Alignment, false);
3838 }
3839
3840 void finalizeInstrumentation() override {
3841 assert(!VAArgSize && !VAArgTLSCopy &&(static_cast <bool> (!VAArgSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3842, __extension__ __PRETTY_FUNCTION__))
3842 "finalizeInstrumentation called twice")(static_cast <bool> (!VAArgSize && !VAArgTLSCopy
&& "finalizeInstrumentation called twice") ? void (0
) : __assert_fail ("!VAArgSize && !VAArgTLSCopy && \"finalizeInstrumentation called twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Instrumentation/MemorySanitizer.cpp"
, 3842, __extension__ __PRETTY_FUNCTION__))
;
3843 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3844 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3845 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3846 VAArgSize);
3847
3848 if (!VAStartInstrumentationList.empty()) {
3849 // If there is a va_start in this function, make a backup copy of
3850 // va_arg_tls somewhere in the function entry block.
3851 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3852 IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
3853 }
3854
3855 // Instrument va_start.
3856 // Copy va_list shadow from the backup copy of the TLS contents.
3857 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3858 CallInst *OrigInst = VAStartInstrumentationList[i];
3859 IRBuilder<> IRB(OrigInst->getNextNode());
3860 Value *VAListTag = OrigInst->getArgOperand(0);
3861 Value *RegSaveAreaPtrPtr =
3862 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3863 PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
3864 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3865 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
3866 unsigned Alignment = 8;
3867 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
3868 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
3869 Alignment, /*isStore*/ true);
3870 IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
3871 CopySize);
3872 }
3873 }
3874};
3875
3876/// \brief A no-op implementation of VarArgHelper.
3877struct VarArgNoOpHelper : public VarArgHelper {
3878 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
3879 MemorySanitizerVisitor &MSV) {}
3880
3881 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
3882
3883 void visitVAStartInst(VAStartInst &I) override {}
3884
3885 void visitVACopyInst(VACopyInst &I) override {}
3886
3887 void finalizeInstrumentation() override {}
3888};
3889
3890} // end anonymous namespace
3891
3892static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
3893 MemorySanitizerVisitor &Visitor) {
3894 // VarArg handling is only implemented on AMD64. False positives are possible
3895 // on other platforms.
3896 Triple TargetTriple(Func.getParent()->getTargetTriple());
3897 if (TargetTriple.getArch() == Triple::x86_64)
3898 return new VarArgAMD64Helper(Func, Msan, Visitor);
3899 else if (TargetTriple.getArch() == Triple::mips64 ||
3900 TargetTriple.getArch() == Triple::mips64el)
3901 return new VarArgMIPS64Helper(Func, Msan, Visitor);
3902 else if (TargetTriple.getArch() == Triple::aarch64)
3903 return new VarArgAArch64Helper(Func, Msan, Visitor);
3904 else if (TargetTriple.getArch() == Triple::ppc64 ||
3905 TargetTriple.getArch() == Triple::ppc64le)
3906 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
3907 else
3908 return new VarArgNoOpHelper(Func, Msan, Visitor);
3909}
3910
3911bool MemorySanitizer::runOnFunction(Function &F) {
3912 if (&F == MsanCtorFunction)
3913 return false;
3914 MemorySanitizerVisitor Visitor(F, *this);
3915
3916 // Clear out readonly/readnone attributes.
3917 AttrBuilder B;
3918 B.addAttribute(Attribute::ReadOnly)
3919 .addAttribute(Attribute::ReadNone);
3920 F.removeAttributes(AttributeList::FunctionIndex, B);
3921
3922 return Visitor.runOnFunction();
3923}