LLVM 22.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Comdat.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalAlias.h"
50#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstVisitor.h"
55#include "llvm/IR/InstrTypes.h"
56#include "llvm/IR/Instruction.h"
59#include "llvm/IR/Intrinsics.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/Type.h"
65#include "llvm/IR/Use.h"
66#include "llvm/IR/Value.h"
70#include "llvm/Support/Debug.h"
83#include <algorithm>
84#include <cassert>
85#include <cstddef>
86#include <cstdint>
87#include <iomanip>
88#include <limits>
89#include <sstream>
90#include <string>
91#include <tuple>
92
93using namespace llvm;
94
95#define DEBUG_TYPE "asan"
96
98static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
99static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
101 std::numeric_limits<uint64_t>::max();
102static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
104static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
105static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
106static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
107static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
108static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
109static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
110static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
111static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
113static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
114static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
115static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
116static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
117static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
118static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
119static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
120static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
121static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
123
124// The shadow memory space is dynamically allocated.
126
127static const size_t kMinStackMallocSize = 1 << 6; // 64B
128static const size_t kMaxStackMallocSize = 1 << 16; // 64K
129static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
130static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
131
132const char kAsanModuleCtorName[] = "asan.module_ctor";
133const char kAsanModuleDtorName[] = "asan.module_dtor";
135// On Emscripten, the system needs more than one priorities for constructors.
137const char kAsanReportErrorTemplate[] = "__asan_report_";
138const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
139const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
140const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
142 "__asan_unregister_image_globals";
143const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
144const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
145const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
146const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
147const char kAsanInitName[] = "__asan_init";
148const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
149const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
150const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
151const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
152static const int kMaxAsanStackMallocSizeClass = 10;
153const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
155 "__asan_stack_malloc_always_";
156const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
157const char kAsanGenPrefix[] = "___asan_gen_";
158const char kODRGenPrefix[] = "__odr_asan_gen_";
159const char kSanCovGenPrefix[] = "__sancov_gen_";
160const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
161const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
162const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
163
164// ASan version script has __asan_* wildcard. Triple underscore prevents a
165// linker (gold) warning about attempting to export a local symbol.
166const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
167
169 "__asan_option_detect_stack_use_after_return";
170
172 "__asan_shadow_memory_dynamic_address";
173
174const char kAsanAllocaPoison[] = "__asan_alloca_poison";
175const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
176
177const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
178const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
179const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
180const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
181
182// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
183static const size_t kNumberOfAccessSizes = 5;
184
185static const uint64_t kAllocaRzSize = 32;
186
187// ASanAccessInfo implementation constants.
188constexpr size_t kCompileKernelShift = 0;
189constexpr size_t kCompileKernelMask = 0x1;
190constexpr size_t kAccessSizeIndexShift = 1;
191constexpr size_t kAccessSizeIndexMask = 0xf;
192constexpr size_t kIsWriteShift = 5;
193constexpr size_t kIsWriteMask = 0x1;
194
195// Command-line flags.
196
198 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
199 cl::Hidden, cl::init(false));
200
202 "asan-recover",
203 cl::desc("Enable recovery mode (continue-after-error)."),
204 cl::Hidden, cl::init(false));
205
207 "asan-guard-against-version-mismatch",
208 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
209 cl::init(true));
210
211// This flag may need to be replaced with -f[no-]asan-reads.
212static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
213 cl::desc("instrument read instructions"),
214 cl::Hidden, cl::init(true));
215
217 "asan-instrument-writes", cl::desc("instrument write instructions"),
218 cl::Hidden, cl::init(true));
219
220static cl::opt<bool>
221 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
222 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
224
226 "asan-instrument-atomics",
227 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
228 cl::init(true));
229
230static cl::opt<bool>
231 ClInstrumentByval("asan-instrument-byval",
232 cl::desc("instrument byval call arguments"), cl::Hidden,
233 cl::init(true));
234
236 "asan-always-slow-path",
237 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
238 cl::init(false));
239
241 "asan-force-dynamic-shadow",
242 cl::desc("Load shadow address into a local variable for each function"),
243 cl::Hidden, cl::init(false));
244
245static cl::opt<bool>
246 ClWithIfunc("asan-with-ifunc",
247 cl::desc("Access dynamic shadow through an ifunc global on "
248 "platforms that support this"),
249 cl::Hidden, cl::init(true));
250
252 "asan-with-ifunc-suppress-remat",
253 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
254 "it through inline asm in prologue."),
255 cl::Hidden, cl::init(true));
256
257// This flag limits the number of instructions to be instrumented
258// in any given BB. Normally, this should be set to unlimited (INT_MAX),
259// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
260// set it to 10000.
262 "asan-max-ins-per-bb", cl::init(10000),
263 cl::desc("maximal number of instructions to instrument in any given BB"),
264 cl::Hidden);
265
266// This flag may need to be replaced with -f[no]asan-stack.
267static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
268 cl::Hidden, cl::init(true));
270 "asan-max-inline-poisoning-size",
271 cl::desc(
272 "Inline shadow poisoning for blocks up to the given size in bytes."),
273 cl::Hidden, cl::init(64));
274
276 "asan-use-after-return",
277 cl::desc("Sets the mode of detection for stack-use-after-return."),
280 "Never detect stack use after return."),
283 "Detect stack use after return if "
284 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
286 "Always detect stack use after return.")),
288
289static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
290 cl::desc("Create redzones for byval "
291 "arguments (extra copy "
292 "required)"), cl::Hidden,
293 cl::init(true));
294
295static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
296 cl::desc("Check stack-use-after-scope"),
297 cl::Hidden, cl::init(false));
298
299// This flag may need to be replaced with -f[no]asan-globals.
300static cl::opt<bool> ClGlobals("asan-globals",
301 cl::desc("Handle global objects"), cl::Hidden,
302 cl::init(true));
303
304static cl::opt<bool> ClInitializers("asan-initialization-order",
305 cl::desc("Handle C++ initializer order"),
306 cl::Hidden, cl::init(true));
307
309 "asan-detect-invalid-pointer-pair",
310 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
311 cl::init(false));
312
314 "asan-detect-invalid-pointer-cmp",
315 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
316 cl::init(false));
317
319 "asan-detect-invalid-pointer-sub",
320 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
321 cl::init(false));
322
324 "asan-realign-stack",
325 cl::desc("Realign stack to the value of this flag (power of two)"),
326 cl::Hidden, cl::init(32));
327
329 "asan-instrumentation-with-call-threshold",
330 cl::desc("If the function being instrumented contains more than "
331 "this number of memory accesses, use callbacks instead of "
332 "inline checks (-1 means never use callbacks)."),
333 cl::Hidden, cl::init(7000));
334
336 "asan-memory-access-callback-prefix",
337 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
338 cl::init("__asan_"));
339
341 "asan-kernel-mem-intrinsic-prefix",
342 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
343 cl::init(false));
344
345static cl::opt<bool>
346 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
347 cl::desc("instrument dynamic allocas"),
348 cl::Hidden, cl::init(true));
349
351 "asan-skip-promotable-allocas",
352 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
353 cl::init(true));
354
356 "asan-constructor-kind",
357 cl::desc("Sets the ASan constructor kind"),
358 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
360 "Use global constructors")),
362// These flags allow to change the shadow mapping.
363// The shadow mapping looks like
364// Shadow = (Mem >> scale) + offset
365
366static cl::opt<int> ClMappingScale("asan-mapping-scale",
367 cl::desc("scale of asan shadow mapping"),
368 cl::Hidden, cl::init(0));
369
371 ClMappingOffset("asan-mapping-offset",
372 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
373 cl::Hidden, cl::init(0));
374
375// Optimization flags. Not user visible, used mostly for testing
376// and benchmarking the tool.
377
378static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
379 cl::Hidden, cl::init(true));
380
381static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
382 cl::desc("Optimize callbacks"),
383 cl::Hidden, cl::init(false));
384
386 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
387 cl::Hidden, cl::init(true));
388
389static cl::opt<bool> ClOptGlobals("asan-opt-globals",
390 cl::desc("Don't instrument scalar globals"),
391 cl::Hidden, cl::init(true));
392
394 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
395 cl::Hidden, cl::init(false));
396
398 "asan-stack-dynamic-alloca",
399 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
400 cl::init(true));
401
403 "asan-force-experiment",
404 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
405 cl::init(0));
406
407static cl::opt<bool>
408 ClUsePrivateAlias("asan-use-private-alias",
409 cl::desc("Use private aliases for global variables"),
410 cl::Hidden, cl::init(true));
411
412static cl::opt<bool>
413 ClUseOdrIndicator("asan-use-odr-indicator",
414 cl::desc("Use odr indicators to improve ODR reporting"),
415 cl::Hidden, cl::init(true));
416
417static cl::opt<bool>
418 ClUseGlobalsGC("asan-globals-live-support",
419 cl::desc("Use linker features to support dead "
420 "code stripping of globals"),
421 cl::Hidden, cl::init(true));
422
423// This is on by default even though there is a bug in gold:
424// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
425static cl::opt<bool>
426 ClWithComdat("asan-with-comdat",
427 cl::desc("Place ASan constructors in comdat sections"),
428 cl::Hidden, cl::init(true));
429
431 "asan-destructor-kind",
432 cl::desc("Sets the ASan destructor kind. The default is to use the value "
433 "provided to the pass constructor"),
434 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
436 "Use global destructors")),
438
439// Debug flags.
440
441static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
442 cl::init(0));
443
444static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
445 cl::Hidden, cl::init(0));
446
448 cl::desc("Debug func"));
449
450static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
451 cl::Hidden, cl::init(-1));
452
453static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
454 cl::Hidden, cl::init(-1));
455
456STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
457STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
458STATISTIC(NumOptimizedAccessesToGlobalVar,
459 "Number of optimized accesses to global vars");
460STATISTIC(NumOptimizedAccessesToStackVar,
461 "Number of optimized accesses to stack vars");
462
463namespace {
464
465/// This struct defines the shadow mapping using the rule:
466/// shadow = (mem >> Scale) ADD-or-OR Offset.
467/// If InGlobal is true, then
468/// extern char __asan_shadow[];
469/// shadow = (mem >> Scale) + &__asan_shadow
470struct ShadowMapping {
471 int Scale;
473 bool OrShadowOffset;
474 bool InGlobal;
475};
476
477} // end anonymous namespace
478
479static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
480 bool IsKasan) {
481 bool IsAndroid = TargetTriple.isAndroid();
482 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
483 TargetTriple.isDriverKit();
484 bool IsMacOS = TargetTriple.isMacOSX();
485 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
486 bool IsNetBSD = TargetTriple.isOSNetBSD();
487 bool IsPS = TargetTriple.isPS();
488 bool IsLinux = TargetTriple.isOSLinux();
489 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
490 TargetTriple.getArch() == Triple::ppc64le;
491 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
492 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
493 bool IsMIPSN32ABI = TargetTriple.isABIN32();
494 bool IsMIPS32 = TargetTriple.isMIPS32();
495 bool IsMIPS64 = TargetTriple.isMIPS64();
496 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
497 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
498 TargetTriple.getArch() == Triple::aarch64_be;
499 bool IsLoongArch64 = TargetTriple.isLoongArch64();
500 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
501 bool IsWindows = TargetTriple.isOSWindows();
502 bool IsFuchsia = TargetTriple.isOSFuchsia();
503 bool IsAMDGPU = TargetTriple.isAMDGPU();
504 bool IsHaiku = TargetTriple.isOSHaiku();
505 bool IsWasm = TargetTriple.isWasm();
506
507 ShadowMapping Mapping;
508
509 Mapping.Scale = kDefaultShadowScale;
510 if (ClMappingScale.getNumOccurrences() > 0) {
511 Mapping.Scale = ClMappingScale;
512 }
513
514 if (LongSize == 32) {
515 if (IsAndroid)
516 Mapping.Offset = kDynamicShadowSentinel;
517 else if (IsMIPSN32ABI)
518 Mapping.Offset = kMIPS_ShadowOffsetN32;
519 else if (IsMIPS32)
520 Mapping.Offset = kMIPS32_ShadowOffset32;
521 else if (IsFreeBSD)
522 Mapping.Offset = kFreeBSD_ShadowOffset32;
523 else if (IsNetBSD)
524 Mapping.Offset = kNetBSD_ShadowOffset32;
525 else if (IsIOS)
526 Mapping.Offset = kDynamicShadowSentinel;
527 else if (IsWindows)
528 Mapping.Offset = kWindowsShadowOffset32;
529 else if (IsWasm)
530 Mapping.Offset = kWebAssemblyShadowOffset;
531 else
532 Mapping.Offset = kDefaultShadowOffset32;
533 } else { // LongSize == 64
534 // Fuchsia is always PIE, which means that the beginning of the address
535 // space is always available.
536 if (IsFuchsia)
537 Mapping.Offset = 0;
538 else if (IsPPC64)
539 Mapping.Offset = kPPC64_ShadowOffset64;
540 else if (IsSystemZ)
541 Mapping.Offset = kSystemZ_ShadowOffset64;
542 else if (IsFreeBSD && IsAArch64)
543 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
544 else if (IsFreeBSD && !IsMIPS64) {
545 if (IsKasan)
546 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
547 else
548 Mapping.Offset = kFreeBSD_ShadowOffset64;
549 } else if (IsNetBSD) {
550 if (IsKasan)
551 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
552 else
553 Mapping.Offset = kNetBSD_ShadowOffset64;
554 } else if (IsPS)
555 Mapping.Offset = kPS_ShadowOffset64;
556 else if (IsLinux && IsX86_64) {
557 if (IsKasan)
558 Mapping.Offset = kLinuxKasan_ShadowOffset64;
559 else
560 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
561 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
562 } else if (IsWindows && IsX86_64) {
563 Mapping.Offset = kWindowsShadowOffset64;
564 } else if (IsMIPS64)
565 Mapping.Offset = kMIPS64_ShadowOffset64;
566 else if (IsIOS)
567 Mapping.Offset = kDynamicShadowSentinel;
568 else if (IsMacOS && IsAArch64)
569 Mapping.Offset = kDynamicShadowSentinel;
570 else if (IsAArch64)
571 Mapping.Offset = kAArch64_ShadowOffset64;
572 else if (IsLoongArch64)
573 Mapping.Offset = kLoongArch64_ShadowOffset64;
574 else if (IsRISCV64)
575 Mapping.Offset = kRISCV64_ShadowOffset64;
576 else if (IsAMDGPU)
577 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
578 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
579 else if (IsHaiku && IsX86_64)
580 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
581 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
582 else
583 Mapping.Offset = kDefaultShadowOffset64;
584 }
585
587 Mapping.Offset = kDynamicShadowSentinel;
588 }
589
590 if (ClMappingOffset.getNumOccurrences() > 0) {
591 Mapping.Offset = ClMappingOffset;
592 }
593
594 // OR-ing shadow offset if more efficient (at least on x86) if the offset
595 // is a power of two, but on ppc64 and loongarch64 we have to use add since
596 // the shadow offset is not necessarily 1/8-th of the address space. On
597 // SystemZ, we could OR the constant in a single instruction, but it's more
598 // efficient to load it once and use indexed addressing.
599 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
600 !IsRISCV64 && !IsLoongArch64 &&
601 !(Mapping.Offset & (Mapping.Offset - 1)) &&
602 Mapping.Offset != kDynamicShadowSentinel;
603 Mapping.InGlobal = ClWithIfunc && IsAndroid && IsArmOrThumb;
604
605 return Mapping;
606}
607
608void llvm::getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
609 bool IsKasan, uint64_t *ShadowBase,
610 int *MappingScale, bool *OrShadowOffset) {
611 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
612 *ShadowBase = Mapping.Offset;
613 *MappingScale = Mapping.Scale;
614 *OrShadowOffset = Mapping.OrShadowOffset;
615}
616
618 // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
619 //
620 // This is not only true for sanitized functions, because AttrInfer can
621 // infer those attributes on libc functions, which is not true if those
622 // are instrumented (Android) or intercepted.
623 //
624 // We might want to model ASan shadow memory more opaquely to get rid of
625 // this problem altogether, by hiding the shadow memory write in an
626 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
627 // for another day.
628
629 // The API is weird. `onlyReadsMemory` actually means "does not write", and
630 // `onlyWritesMemory` actually means "does not read". So we reconstruct
631 // "accesses memory" && "does not read" <=> "writes".
632 bool Changed = false;
633 if (!F.doesNotAccessMemory()) {
634 bool WritesMemory = !F.onlyReadsMemory();
635 bool ReadsMemory = !F.onlyWritesMemory();
636 if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
637 F.removeFnAttr(Attribute::Memory);
638 Changed = true;
639 }
640 }
641 if (ReadsArgMem) {
642 for (Argument &A : F.args()) {
643 if (A.hasAttribute(Attribute::WriteOnly)) {
644 A.removeAttr(Attribute::WriteOnly);
645 Changed = true;
646 }
647 }
648 }
649 if (Changed) {
650 // nobuiltin makes sure later passes don't restore assumptions about
651 // the function.
652 F.addFnAttr(Attribute::NoBuiltin);
653 }
654}
655
661
669
670static uint64_t getRedzoneSizeForScale(int MappingScale) {
671 // Redzone used for stack and globals is at least 32 bytes.
672 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
673 return std::max(32U, 1U << MappingScale);
674}
675
677 if (TargetTriple.isOSEmscripten())
679 else
681}
682
683static Twine genName(StringRef suffix) {
684 return Twine(kAsanGenPrefix) + suffix;
685}
686
687namespace {
688/// Helper RAII class to post-process inserted asan runtime calls during a
689/// pass on a single Function. Upon end of scope, detects and applies the
690/// required funclet OpBundle.
691class RuntimeCallInserter {
692 Function *OwnerFn = nullptr;
693 bool TrackInsertedCalls = false;
694 SmallVector<CallInst *> InsertedCalls;
695
696public:
697 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
698 if (Fn.hasPersonalityFn()) {
699 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
700 if (isScopedEHPersonality(Personality))
701 TrackInsertedCalls = true;
702 }
703 }
704
705 ~RuntimeCallInserter() {
706 if (InsertedCalls.empty())
707 return;
708 assert(TrackInsertedCalls && "Calls were wrongly tracked");
709
710 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
711 for (CallInst *CI : InsertedCalls) {
712 BasicBlock *BB = CI->getParent();
713 assert(BB && "Instruction doesn't belong to a BasicBlock");
714 assert(BB->getParent() == OwnerFn &&
715 "Instruction doesn't belong to the expected Function!");
716
717 ColorVector &Colors = BlockColors[BB];
718 // funclet opbundles are only valid in monochromatic BBs.
719 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
720 // and will be DCE'ed later.
721 if (Colors.empty())
722 continue;
723 if (Colors.size() != 1) {
724 OwnerFn->getContext().emitError(
725 "Instruction's BasicBlock is not monochromatic");
726 continue;
727 }
728
729 BasicBlock *Color = Colors.front();
730 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
731
732 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
733 // Replace CI with a clone with an added funclet OperandBundle
734 OperandBundleDef OB("funclet", &*EHPadIt);
736 OB, CI->getIterator());
737 NewCall->copyMetadata(*CI);
738 CI->replaceAllUsesWith(NewCall);
739 CI->eraseFromParent();
740 }
741 }
742 }
743
744 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
745 ArrayRef<Value *> Args = {},
746 const Twine &Name = "") {
747 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
748
749 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
750 if (TrackInsertedCalls)
751 InsertedCalls.push_back(Inst);
752 return Inst;
753 }
754};
755
756/// AddressSanitizer: instrument the code in module to find memory bugs.
757struct AddressSanitizer {
758 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
759 int InstrumentationWithCallsThreshold,
760 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
761 bool Recover = false, bool UseAfterScope = false,
762 AsanDetectStackUseAfterReturnMode UseAfterReturn =
763 AsanDetectStackUseAfterReturnMode::Runtime)
764 : M(M),
765 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
766 : CompileKernel),
767 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
768 UseAfterScope(UseAfterScope || ClUseAfterScope),
769 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
770 : UseAfterReturn),
771 SSGI(SSGI),
772 InstrumentationWithCallsThreshold(
773 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
775 : InstrumentationWithCallsThreshold),
776 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
778 : MaxInlinePoisoningSize) {
779 C = &(M.getContext());
780 DL = &M.getDataLayout();
781 LongSize = M.getDataLayout().getPointerSizeInBits();
782 IntptrTy = Type::getIntNTy(*C, LongSize);
783 PtrTy = PointerType::getUnqual(*C);
784 Int32Ty = Type::getInt32Ty(*C);
785 TargetTriple = M.getTargetTriple();
786
787 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
788
789 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
790 }
791
792 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
793 return *AI.getAllocationSize(AI.getDataLayout());
794 }
795
796 /// Check if we want (and can) handle this alloca.
797 bool isInterestingAlloca(const AllocaInst &AI);
798
799 bool ignoreAccess(Instruction *Inst, Value *Ptr);
801 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
802 const TargetTransformInfo *TTI);
803
804 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
805 InterestingMemoryOperand &O, bool UseCalls,
806 const DataLayout &DL, RuntimeCallInserter &RTCI);
807 void instrumentPointerComparisonOrSubtraction(Instruction *I,
808 RuntimeCallInserter &RTCI);
809 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
810 Value *Addr, MaybeAlign Alignment,
811 uint32_t TypeStoreSize, bool IsWrite,
812 Value *SizeArgument, bool UseCalls, uint32_t Exp,
813 RuntimeCallInserter &RTCI);
814 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
815 Instruction *InsertBefore, Value *Addr,
816 uint32_t TypeStoreSize, bool IsWrite,
817 Value *SizeArgument);
818 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
819 bool Recover);
820 void instrumentUnusualSizeOrAlignment(Instruction *I,
821 Instruction *InsertBefore, Value *Addr,
822 TypeSize TypeStoreSize, bool IsWrite,
823 Value *SizeArgument, bool UseCalls,
824 uint32_t Exp,
825 RuntimeCallInserter &RTCI);
826 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
827 Type *IntptrTy, Value *Mask, Value *EVL,
828 Value *Stride, Instruction *I, Value *Addr,
829 MaybeAlign Alignment, unsigned Granularity,
830 Type *OpType, bool IsWrite,
831 Value *SizeArgument, bool UseCalls,
832 uint32_t Exp, RuntimeCallInserter &RTCI);
833 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
834 Value *ShadowValue, uint32_t TypeStoreSize);
835 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
836 bool IsWrite, size_t AccessSizeIndex,
837 Value *SizeArgument, uint32_t Exp,
838 RuntimeCallInserter &RTCI);
839 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
840 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
841 bool suppressInstrumentationSiteForDebug(int &Instrumented);
842 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
843 const TargetTransformInfo *TTI);
844 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
845 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
846 void markEscapedLocalAllocas(Function &F);
847 void markCatchParametersAsUninteresting(Function &F);
848
849private:
850 friend struct FunctionStackPoisoner;
851
852 void initializeCallbacks(const TargetLibraryInfo *TLI);
853
854 bool LooksLikeCodeInBug11395(Instruction *I);
855 bool GlobalIsLinkerInitialized(GlobalVariable *G);
856 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
857 TypeSize TypeStoreSize) const;
858
859 /// Helper to cleanup per-function state.
860 struct FunctionStateRAII {
861 AddressSanitizer *Pass;
862
863 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
864 assert(Pass->ProcessedAllocas.empty() &&
865 "last pass forgot to clear cache");
866 assert(!Pass->LocalDynamicShadow);
867 }
868
869 ~FunctionStateRAII() {
870 Pass->LocalDynamicShadow = nullptr;
871 Pass->ProcessedAllocas.clear();
872 }
873 };
874
875 Module &M;
876 LLVMContext *C;
877 const DataLayout *DL;
878 Triple TargetTriple;
879 int LongSize;
880 bool CompileKernel;
881 bool Recover;
882 bool UseAfterScope;
884 Type *IntptrTy;
885 Type *Int32Ty;
886 PointerType *PtrTy;
887 ShadowMapping Mapping;
888 FunctionCallee AsanHandleNoReturnFunc;
889 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
890 Constant *AsanShadowGlobal;
891
892 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
893 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
894 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
895
896 // These arrays is indexed by AccessIsWrite and Experiment.
897 FunctionCallee AsanErrorCallbackSized[2][2];
898 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
899
900 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
901 Value *LocalDynamicShadow = nullptr;
902 const StackSafetyGlobalInfo *SSGI;
903 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
904
905 FunctionCallee AMDGPUAddressShared;
906 FunctionCallee AMDGPUAddressPrivate;
907 int InstrumentationWithCallsThreshold;
908 uint32_t MaxInlinePoisoningSize;
909};
910
911class ModuleAddressSanitizer {
912public:
913 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
914 bool CompileKernel = false, bool Recover = false,
915 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
916 AsanDtorKind DestructorKind = AsanDtorKind::Global,
917 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
918 : M(M),
919 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
920 : CompileKernel),
921 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
923 : InsertVersionCheck),
924 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
925 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
926 // Enable aliases as they should have no downside with ODR indicators.
927 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
929 : UseOdrIndicator),
930 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
932 : UseOdrIndicator),
933 // Not a typo: ClWithComdat is almost completely pointless without
934 // ClUseGlobalsGC (because then it only works on modules without
935 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
936 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
937 // argument is designed as workaround. Therefore, disable both
938 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
939 // do globals-gc.
940 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
941 DestructorKind(DestructorKind),
942 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
944 : ConstructorKind) {
945 C = &(M.getContext());
946 int LongSize = M.getDataLayout().getPointerSizeInBits();
947 IntptrTy = Type::getIntNTy(*C, LongSize);
948 PtrTy = PointerType::getUnqual(*C);
949 TargetTriple = M.getTargetTriple();
950 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
951
952 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
953 this->DestructorKind = ClOverrideDestructorKind;
954 assert(this->DestructorKind != AsanDtorKind::Invalid);
955 }
956
957 bool instrumentModule();
958
959private:
960 void initializeCallbacks();
961
962 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
963 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
964 ArrayRef<GlobalVariable *> ExtendedGlobals,
965 ArrayRef<Constant *> MetadataInitializers);
966 void instrumentGlobalsELF(IRBuilder<> &IRB,
967 ArrayRef<GlobalVariable *> ExtendedGlobals,
968 ArrayRef<Constant *> MetadataInitializers,
969 const std::string &UniqueModuleId);
970 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
971 ArrayRef<GlobalVariable *> ExtendedGlobals,
972 ArrayRef<Constant *> MetadataInitializers);
973 void
974 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
975 ArrayRef<GlobalVariable *> ExtendedGlobals,
976 ArrayRef<Constant *> MetadataInitializers);
977
978 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
979 StringRef OriginalName);
980 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
981 StringRef InternalSuffix);
982 Instruction *CreateAsanModuleDtor();
983
984 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
985 bool shouldInstrumentGlobal(GlobalVariable *G) const;
986 bool ShouldUseMachOGlobalsSection() const;
987 StringRef getGlobalMetadataSection() const;
988 void poisonOneInitializer(Function &GlobalInit);
989 void createInitializerPoisonCalls();
990 uint64_t getMinRedzoneSizeForGlobal() const {
991 return getRedzoneSizeForScale(Mapping.Scale);
992 }
993 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
994 int GetAsanVersion() const;
995 GlobalVariable *getOrCreateModuleName();
996
997 Module &M;
998 bool CompileKernel;
999 bool InsertVersionCheck;
1000 bool Recover;
1001 bool UseGlobalsGC;
1002 bool UsePrivateAlias;
1003 bool UseOdrIndicator;
1004 bool UseCtorComdat;
1005 AsanDtorKind DestructorKind;
1006 AsanCtorKind ConstructorKind;
1007 Type *IntptrTy;
1008 PointerType *PtrTy;
1009 LLVMContext *C;
1010 Triple TargetTriple;
1011 ShadowMapping Mapping;
1012 FunctionCallee AsanPoisonGlobals;
1013 FunctionCallee AsanUnpoisonGlobals;
1014 FunctionCallee AsanRegisterGlobals;
1015 FunctionCallee AsanUnregisterGlobals;
1016 FunctionCallee AsanRegisterImageGlobals;
1017 FunctionCallee AsanUnregisterImageGlobals;
1018 FunctionCallee AsanRegisterElfGlobals;
1019 FunctionCallee AsanUnregisterElfGlobals;
1020
1021 Function *AsanCtorFunction = nullptr;
1022 Function *AsanDtorFunction = nullptr;
1023 GlobalVariable *ModuleName = nullptr;
1024};
1025
1026// Stack poisoning does not play well with exception handling.
1027// When an exception is thrown, we essentially bypass the code
1028// that unpoisones the stack. This is why the run-time library has
1029// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1030// stack in the interceptor. This however does not work inside the
1031// actual function which catches the exception. Most likely because the
1032// compiler hoists the load of the shadow value somewhere too high.
1033// This causes asan to report a non-existing bug on 453.povray.
1034// It sounds like an LLVM bug.
1035struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1036 Function &F;
1037 AddressSanitizer &ASan;
1038 RuntimeCallInserter &RTCI;
1039 DIBuilder DIB;
1040 LLVMContext *C;
1041 Type *IntptrTy;
1042 Type *IntptrPtrTy;
1043 ShadowMapping Mapping;
1044
1046 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1047 SmallVector<Instruction *, 8> RetVec;
1048
1049 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1050 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1051 FunctionCallee AsanSetShadowFunc[0x100] = {};
1052 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1053 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1054
1055 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1056 struct AllocaPoisonCall {
1057 IntrinsicInst *InsBefore;
1058 AllocaInst *AI;
1059 uint64_t Size;
1060 bool DoPoison;
1061 };
1062 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1063 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1064
1065 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1066 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1067 AllocaInst *DynamicAllocaLayout = nullptr;
1068 IntrinsicInst *LocalEscapeCall = nullptr;
1069
1070 bool HasInlineAsm = false;
1071 bool HasReturnsTwiceCall = false;
1072 bool PoisonStack;
1073
1074 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1075 RuntimeCallInserter &RTCI)
1076 : F(F), ASan(ASan), RTCI(RTCI),
1077 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1078 IntptrTy(ASan.IntptrTy),
1079 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1080 Mapping(ASan.Mapping),
1081 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1082
1083 bool runOnFunction() {
1084 if (!PoisonStack)
1085 return false;
1086
1088 copyArgsPassedByValToAllocas();
1089
1090 // Collect alloca, ret, lifetime instructions etc.
1091 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1092
1093 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1094
1095 initializeCallbacks(*F.getParent());
1096
1097 processDynamicAllocas();
1098 processStaticAllocas();
1099
1100 if (ClDebugStack) {
1101 LLVM_DEBUG(dbgs() << F);
1102 }
1103 return true;
1104 }
1105
1106 // Arguments marked with the "byval" attribute are implicitly copied without
1107 // using an alloca instruction. To produce redzones for those arguments, we
1108 // copy them a second time into memory allocated with an alloca instruction.
1109 void copyArgsPassedByValToAllocas();
1110
1111 // Finds all Alloca instructions and puts
1112 // poisoned red zones around all of them.
1113 // Then unpoison everything back before the function returns.
1114 void processStaticAllocas();
1115 void processDynamicAllocas();
1116
1117 void createDynamicAllocasInitStorage();
1118
1119 // ----------------------- Visitors.
1120 /// Collect all Ret instructions, or the musttail call instruction if it
1121 /// precedes the return instruction.
1122 void visitReturnInst(ReturnInst &RI) {
1123 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1124 RetVec.push_back(CI);
1125 else
1126 RetVec.push_back(&RI);
1127 }
1128
1129 /// Collect all Resume instructions.
1130 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1131
1132 /// Collect all CatchReturnInst instructions.
1133 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1134
1135 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1136 Value *SavedStack) {
1137 IRBuilder<> IRB(InstBefore);
1138 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1139 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1140 // need to adjust extracted SP to compute the address of the most recent
1141 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1142 // this purpose.
1143 if (!isa<ReturnInst>(InstBefore)) {
1144 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1145 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1146
1147 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1148 DynamicAreaOffset);
1149 }
1150
1151 RTCI.createRuntimeCall(
1152 IRB, AsanAllocasUnpoisonFunc,
1153 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1154 }
1155
1156 // Unpoison dynamic allocas redzones.
1157 void unpoisonDynamicAllocas() {
1158 for (Instruction *Ret : RetVec)
1159 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1160
1161 for (Instruction *StackRestoreInst : StackRestoreVec)
1162 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1163 StackRestoreInst->getOperand(0));
1164 }
1165
1166 // Deploy and poison redzones around dynamic alloca call. To do this, we
1167 // should replace this call with another one with changed parameters and
1168 // replace all its uses with new address, so
1169 // addr = alloca type, old_size, align
1170 // is replaced by
1171 // new_size = (old_size + additional_size) * sizeof(type)
1172 // tmp = alloca i8, new_size, max(align, 32)
1173 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1174 // Additional_size is added to make new memory allocation contain not only
1175 // requested memory, but also left, partial and right redzones.
1176 void handleDynamicAllocaCall(AllocaInst *AI);
1177
1178 /// Collect Alloca instructions we want (and can) handle.
1179 void visitAllocaInst(AllocaInst &AI) {
1180 // FIXME: Handle scalable vectors instead of ignoring them.
1181 const Type *AllocaType = AI.getAllocatedType();
1182 const auto *STy = dyn_cast<StructType>(AllocaType);
1183 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1184 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1185 if (AI.isStaticAlloca()) {
1186 // Skip over allocas that are present *before* the first instrumented
1187 // alloca, we don't want to move those around.
1188 if (AllocaVec.empty())
1189 return;
1190
1191 StaticAllocasToMoveUp.push_back(&AI);
1192 }
1193 return;
1194 }
1195
1196 if (!AI.isStaticAlloca())
1197 DynamicAllocaVec.push_back(&AI);
1198 else
1199 AllocaVec.push_back(&AI);
1200 }
1201
1202 /// Collect lifetime intrinsic calls to check for use-after-scope
1203 /// errors.
1204 void visitIntrinsicInst(IntrinsicInst &II) {
1205 Intrinsic::ID ID = II.getIntrinsicID();
1206 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1207 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1208 if (!ASan.UseAfterScope)
1209 return;
1210 if (!II.isLifetimeStartOrEnd())
1211 return;
1212 // Find alloca instruction that corresponds to llvm.lifetime argument.
1213 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1214 // We're interested only in allocas we can handle.
1215 if (!AI || !ASan.isInterestingAlloca(*AI))
1216 return;
1217
1218 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1219 // Check that size is known and can be stored in IntptrTy.
1220 // TODO: Add support for scalable vectors if possible.
1221 if (!Size || Size->isScalable() ||
1223 return;
1224
1225 bool DoPoison = (ID == Intrinsic::lifetime_end);
1226 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1227 if (AI->isStaticAlloca())
1228 StaticAllocaPoisonCallVec.push_back(APC);
1230 DynamicAllocaPoisonCallVec.push_back(APC);
1231 }
1232
1233 void visitCallBase(CallBase &CB) {
1234 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1235 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1236 HasReturnsTwiceCall |= CI->canReturnTwice();
1237 }
1238 }
1239
1240 // ---------------------- Helpers.
1241 void initializeCallbacks(Module &M);
1242
1243 // Copies bytes from ShadowBytes into shadow memory for indexes where
1244 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1245 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1246 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1247 IRBuilder<> &IRB, Value *ShadowBase);
1248 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1249 size_t Begin, size_t End, IRBuilder<> &IRB,
1250 Value *ShadowBase);
1251 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1252 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1253 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1254
1255 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1256
1257 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1258 bool Dynamic);
1259 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1260 Instruction *ThenTerm, Value *ValueIfFalse);
1261};
1262
1263} // end anonymous namespace
1264
1266 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1268 OS, MapClassName2PassName);
1269 OS << '<';
1270 if (Options.CompileKernel)
1271 OS << "kernel;";
1272 if (Options.UseAfterScope)
1273 OS << "use-after-scope";
1274 OS << '>';
1275}
1276
1278 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1279 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1280 AsanCtorKind ConstructorKind)
1281 : Options(Options), UseGlobalGC(UseGlobalGC),
1282 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1283 ConstructorKind(ConstructorKind) {}
1284
1287 // Return early if nosanitize_address module flag is present for the module.
1288 // This implies that asan pass has already run before.
1289 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1290 return PreservedAnalyses::all();
1291
1292 ModuleAddressSanitizer ModuleSanitizer(
1293 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1294 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1295 bool Modified = false;
1296 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1297 const StackSafetyGlobalInfo *const SSGI =
1298 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1299 for (Function &F : M) {
1300 if (F.empty())
1301 continue;
1302 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1303 continue;
1304 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1305 continue;
1306 if (F.getName().starts_with("__asan_"))
1307 continue;
1308 if (F.isPresplitCoroutine())
1309 continue;
1310 AddressSanitizer FunctionSanitizer(
1311 M, SSGI, Options.InstrumentationWithCallsThreshold,
1312 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1313 Options.UseAfterScope, Options.UseAfterReturn);
1314 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1315 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1316 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
1317 }
1318 Modified |= ModuleSanitizer.instrumentModule();
1319 if (!Modified)
1320 return PreservedAnalyses::all();
1321
1323 // GlobalsAA is considered stateless and does not get invalidated unless
1324 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1325 // make changes that require GlobalsAA to be invalidated.
1326 PA.abandon<GlobalsAA>();
1327 return PA;
1328}
1329
1331 size_t Res = llvm::countr_zero(TypeSize / 8);
1333 return Res;
1334}
1335
1336/// Check if \p G has been created by a trusted compiler pass.
1338 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1339 if (G->getName().starts_with("llvm.") ||
1340 // Do not instrument gcov counter arrays.
1341 G->getName().starts_with("__llvm_gcov_ctr") ||
1342 // Do not instrument rtti proxy symbols for function sanitizer.
1343 G->getName().starts_with("__llvm_rtti_proxy"))
1344 return true;
1345
1346 // Do not instrument asan globals.
1347 if (G->getName().starts_with(kAsanGenPrefix) ||
1348 G->getName().starts_with(kSanCovGenPrefix) ||
1349 G->getName().starts_with(kODRGenPrefix))
1350 return true;
1351
1352 return false;
1353}
1354
1356 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1357 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1358 if (AddrSpace == 3 || AddrSpace == 5)
1359 return true;
1360 return false;
1361}
1362
1363Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1364 // Shadow >> scale
1365 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1366 if (Mapping.Offset == 0) return Shadow;
1367 // (Shadow >> scale) | offset
1368 Value *ShadowBase;
1369 if (LocalDynamicShadow)
1370 ShadowBase = LocalDynamicShadow;
1371 else
1372 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1373 if (Mapping.OrShadowOffset)
1374 return IRB.CreateOr(Shadow, ShadowBase);
1375 else
1376 return IRB.CreateAdd(Shadow, ShadowBase);
1377}
1378
1379// Instrument memset/memmove/memcpy
1380void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1381 RuntimeCallInserter &RTCI) {
1383 if (isa<MemTransferInst>(MI)) {
1384 RTCI.createRuntimeCall(
1385 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1386 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1387 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1388 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1389 } else if (isa<MemSetInst>(MI)) {
1390 RTCI.createRuntimeCall(
1391 IRB, AsanMemset,
1392 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1393 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1394 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1395 }
1396 MI->eraseFromParent();
1397}
1398
1399/// Check if we want (and can) handle this alloca.
1400bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1401 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1402
1403 if (!Inserted)
1404 return It->getSecond();
1405
1406 bool IsInteresting =
1407 (AI.getAllocatedType()->isSized() &&
1408 // alloca() may be called with 0 size, ignore it.
1409 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1410 // We are only interested in allocas not promotable to registers.
1411 // Promotable allocas are common under -O0.
1413 // inalloca allocas are not treated as static, and we don't want
1414 // dynamic alloca instrumentation for them as well.
1415 !AI.isUsedWithInAlloca() &&
1416 // swifterror allocas are register promoted by ISel
1417 !AI.isSwiftError() &&
1418 // safe allocas are not interesting
1419 !(SSGI && SSGI->isSafe(AI)));
1420
1421 It->second = IsInteresting;
1422 return IsInteresting;
1423}
1424
1425bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1426 // Instrument accesses from different address spaces only for AMDGPU.
1427 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1428 if (PtrTy->getPointerAddressSpace() != 0 &&
1429 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1430 return true;
1431
1432 // Ignore swifterror addresses.
1433 // swifterror memory addresses are mem2reg promoted by instruction
1434 // selection. As such they cannot have regular uses like an instrumentation
1435 // function and it makes no sense to track them as memory.
1436 if (Ptr->isSwiftError())
1437 return true;
1438
1439 // Treat memory accesses to promotable allocas as non-interesting since they
1440 // will not cause memory violations. This greatly speeds up the instrumented
1441 // executable at -O0.
1442 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1443 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1444 return true;
1445
1446 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1448 return true;
1449
1450 return false;
1451}
1452
1453void AddressSanitizer::getInterestingMemoryOperands(
1455 const TargetTransformInfo *TTI) {
1456 // Do not instrument the load fetching the dynamic shadow address.
1457 if (LocalDynamicShadow == I)
1458 return;
1459
1460 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1461 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1462 return;
1463 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1464 LI->getType(), LI->getAlign());
1465 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1466 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1467 return;
1468 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1469 SI->getValueOperand()->getType(), SI->getAlign());
1470 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1471 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1472 return;
1473 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1474 RMW->getValOperand()->getType(), std::nullopt);
1475 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1476 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1477 return;
1478 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1479 XCHG->getCompareOperand()->getType(),
1480 std::nullopt);
1481 } else if (auto CI = dyn_cast<CallInst>(I)) {
1482 switch (CI->getIntrinsicID()) {
1483 case Intrinsic::masked_load:
1484 case Intrinsic::masked_store:
1485 case Intrinsic::masked_gather:
1486 case Intrinsic::masked_scatter: {
1487 bool IsWrite = CI->getType()->isVoidTy();
1488 // Masked store has an initial operand for the value.
1489 unsigned OpOffset = IsWrite ? 1 : 0;
1490 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1491 return;
1492
1493 auto BasePtr = CI->getOperand(OpOffset);
1494 if (ignoreAccess(I, BasePtr))
1495 return;
1496 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1497 MaybeAlign Alignment = CI->getParamAlign(0);
1498 Value *Mask = CI->getOperand(1 + OpOffset);
1499 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1500 break;
1501 }
1502 case Intrinsic::masked_expandload:
1503 case Intrinsic::masked_compressstore: {
1504 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1505 unsigned OpOffset = IsWrite ? 1 : 0;
1506 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1507 return;
1508 auto BasePtr = CI->getOperand(OpOffset);
1509 if (ignoreAccess(I, BasePtr))
1510 return;
1511 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1512 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1513
1514 IRBuilder IB(I);
1515 Value *Mask = CI->getOperand(1 + OpOffset);
1516 // Use the popcount of Mask as the effective vector length.
1517 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1518 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1519 Value *EVL = IB.CreateAddReduce(ExtMask);
1520 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1521 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1522 EVL);
1523 break;
1524 }
1525 case Intrinsic::vp_load:
1526 case Intrinsic::vp_store:
1527 case Intrinsic::experimental_vp_strided_load:
1528 case Intrinsic::experimental_vp_strided_store: {
1529 auto *VPI = cast<VPIntrinsic>(CI);
1530 unsigned IID = CI->getIntrinsicID();
1531 bool IsWrite = CI->getType()->isVoidTy();
1532 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1533 return;
1534 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1535 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1536 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1537 Value *Stride = nullptr;
1538 if (IID == Intrinsic::experimental_vp_strided_store ||
1539 IID == Intrinsic::experimental_vp_strided_load) {
1540 Stride = VPI->getOperand(PtrOpNo + 1);
1541 // Use the pointer alignment as the element alignment if the stride is a
1542 // mutiple of the pointer alignment. Otherwise, the element alignment
1543 // should be Align(1).
1544 unsigned PointerAlign = Alignment.valueOrOne().value();
1545 if (!isa<ConstantInt>(Stride) ||
1546 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1547 Alignment = Align(1);
1548 }
1549 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1550 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1551 Stride);
1552 break;
1553 }
1554 case Intrinsic::vp_gather:
1555 case Intrinsic::vp_scatter: {
1556 auto *VPI = cast<VPIntrinsic>(CI);
1557 unsigned IID = CI->getIntrinsicID();
1558 bool IsWrite = IID == Intrinsic::vp_scatter;
1559 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1560 return;
1561 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1562 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1563 MaybeAlign Alignment = VPI->getPointerAlignment();
1564 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1565 VPI->getMaskParam(),
1566 VPI->getVectorLengthParam());
1567 break;
1568 }
1569 default:
1570 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1571 MemIntrinsicInfo IntrInfo;
1572 if (TTI->getTgtMemIntrinsic(II, IntrInfo))
1573 Interesting = IntrInfo.InterestingOperands;
1574 return;
1575 }
1576 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1577 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1578 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1579 continue;
1580 Type *Ty = CI->getParamByValType(ArgNo);
1581 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1582 }
1583 }
1584 }
1585}
1586
1587static bool isPointerOperand(Value *V) {
1588 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1589}
1590
1591// This is a rough heuristic; it may cause both false positives and
1592// false negatives. The proper implementation requires cooperation with
1593// the frontend.
1595 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1596 if (!Cmp->isRelational())
1597 return false;
1598 } else {
1599 return false;
1600 }
1601 return isPointerOperand(I->getOperand(0)) &&
1602 isPointerOperand(I->getOperand(1));
1603}
1604
1605// This is a rough heuristic; it may cause both false positives and
1606// false negatives. The proper implementation requires cooperation with
1607// the frontend.
1610 if (BO->getOpcode() != Instruction::Sub)
1611 return false;
1612 } else {
1613 return false;
1614 }
1615 return isPointerOperand(I->getOperand(0)) &&
1616 isPointerOperand(I->getOperand(1));
1617}
1618
1619bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1620 // If a global variable does not have dynamic initialization we don't
1621 // have to instrument it. However, if a global does not have initializer
1622 // at all, we assume it has dynamic initializer (in other TU).
1623 if (!G->hasInitializer())
1624 return false;
1625
1626 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1627 return false;
1628
1629 return true;
1630}
1631
1632void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1633 Instruction *I, RuntimeCallInserter &RTCI) {
1634 IRBuilder<> IRB(I);
1635 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1636 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1637 for (Value *&i : Param) {
1638 if (i->getType()->isPointerTy())
1639 i = IRB.CreatePointerCast(i, IntptrTy);
1640 }
1641 RTCI.createRuntimeCall(IRB, F, Param);
1642}
1643
1644static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1645 Instruction *InsertBefore, Value *Addr,
1646 MaybeAlign Alignment, unsigned Granularity,
1647 TypeSize TypeStoreSize, bool IsWrite,
1648 Value *SizeArgument, bool UseCalls,
1649 uint32_t Exp, RuntimeCallInserter &RTCI) {
1650 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1651 // if the data is properly aligned.
1652 if (!TypeStoreSize.isScalable()) {
1653 const auto FixedSize = TypeStoreSize.getFixedValue();
1654 switch (FixedSize) {
1655 case 8:
1656 case 16:
1657 case 32:
1658 case 64:
1659 case 128:
1660 if (!Alignment || *Alignment >= Granularity ||
1661 *Alignment >= FixedSize / 8)
1662 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1663 FixedSize, IsWrite, nullptr, UseCalls,
1664 Exp, RTCI);
1665 }
1666 }
1667 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1668 IsWrite, nullptr, UseCalls, Exp, RTCI);
1669}
1670
1671void AddressSanitizer::instrumentMaskedLoadOrStore(
1672 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1673 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1674 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1675 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1676 RuntimeCallInserter &RTCI) {
1677 auto *VTy = cast<VectorType>(OpType);
1678 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1679 auto Zero = ConstantInt::get(IntptrTy, 0);
1680
1681 IRBuilder IB(I);
1682 Instruction *LoopInsertBefore = I;
1683 if (EVL) {
1684 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1685 // than zero, so we should check whether EVL is zero here.
1686 Type *EVLType = EVL->getType();
1687 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1688 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1689 IB.SetInsertPoint(LoopInsertBefore);
1690 // Cast EVL to IntptrTy.
1691 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1692 // To avoid undefined behavior for extracting with out of range index, use
1693 // the minimum of evl and element count as trip count.
1694 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1695 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1696 } else {
1697 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1698 }
1699
1700 // Cast Stride to IntptrTy.
1701 if (Stride)
1702 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1703
1704 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1705 [&](IRBuilderBase &IRB, Value *Index) {
1706 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1707 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1708 if (MaskElemC->isZero())
1709 // No check
1710 return;
1711 // Unconditional check
1712 } else {
1713 // Conditional check
1714 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1715 MaskElem, &*IRB.GetInsertPoint(), false);
1716 IRB.SetInsertPoint(ThenTerm);
1717 }
1718
1719 Value *InstrumentedAddress;
1720 if (isa<VectorType>(Addr->getType())) {
1721 assert(
1722 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1723 "Expected vector of pointer.");
1724 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1725 } else if (Stride) {
1726 Index = IRB.CreateMul(Index, Stride);
1727 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1728 } else {
1729 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1730 }
1731 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1732 Alignment, Granularity, ElemTypeSize, IsWrite,
1733 SizeArgument, UseCalls, Exp, RTCI);
1734 });
1735}
1736
1737void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1738 InterestingMemoryOperand &O, bool UseCalls,
1739 const DataLayout &DL,
1740 RuntimeCallInserter &RTCI) {
1741 Value *Addr = O.getPtr();
1742
1743 // Optimization experiments.
1744 // The experiments can be used to evaluate potential optimizations that remove
1745 // instrumentation (assess false negatives). Instead of completely removing
1746 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1747 // experiments that want to remove instrumentation of this instruction).
1748 // If Exp is non-zero, this pass will emit special calls into runtime
1749 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1750 // make runtime terminate the program in a special way (with a different
1751 // exit status). Then you run the new compiler on a buggy corpus, collect
1752 // the special terminations (ideally, you don't see them at all -- no false
1753 // negatives) and make the decision on the optimization.
1754 uint32_t Exp = ClForceExperiment;
1755
1756 if (ClOpt && ClOptGlobals) {
1757 // If initialization order checking is disabled, a simple access to a
1758 // dynamically initialized global is always valid.
1760 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1761 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1762 NumOptimizedAccessesToGlobalVar++;
1763 return;
1764 }
1765 }
1766
1767 if (ClOpt && ClOptStack) {
1768 // A direct inbounds access to a stack variable is always valid.
1770 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1771 NumOptimizedAccessesToStackVar++;
1772 return;
1773 }
1774 }
1775
1776 if (O.IsWrite)
1777 NumInstrumentedWrites++;
1778 else
1779 NumInstrumentedReads++;
1780
1781 if (O.MaybeByteOffset) {
1782 Type *Ty = Type::getInt8Ty(*C);
1783 IRBuilder IB(O.getInsn());
1784
1785 Value *OffsetOp = O.MaybeByteOffset;
1786 if (TargetTriple.isRISCV()) {
1787 Type *OffsetTy = OffsetOp->getType();
1788 // RVV indexed loads/stores zero-extend offset operands which are narrower
1789 // than XLEN to XLEN.
1790 if (OffsetTy->getScalarType()->getIntegerBitWidth() <
1791 static_cast<unsigned>(LongSize)) {
1792 VectorType *OrigType = cast<VectorType>(OffsetTy);
1793 Type *ExtendTy = VectorType::get(IntptrTy, OrigType);
1794 OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
1795 }
1796 }
1797 Addr = IB.CreateGEP(Ty, Addr, {OffsetOp});
1798 }
1799
1800 unsigned Granularity = 1 << Mapping.Scale;
1801 if (O.MaybeMask) {
1802 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1803 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1804 Granularity, O.OpType, O.IsWrite, nullptr,
1805 UseCalls, Exp, RTCI);
1806 } else {
1807 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1808 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1809 UseCalls, Exp, RTCI);
1810 }
1811}
1812
1813Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1814 Value *Addr, bool IsWrite,
1815 size_t AccessSizeIndex,
1816 Value *SizeArgument,
1817 uint32_t Exp,
1818 RuntimeCallInserter &RTCI) {
1819 InstrumentationIRBuilder IRB(InsertBefore);
1820 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1821 CallInst *Call = nullptr;
1822 if (SizeArgument) {
1823 if (Exp == 0)
1824 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1825 {Addr, SizeArgument});
1826 else
1827 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1828 {Addr, SizeArgument, ExpVal});
1829 } else {
1830 if (Exp == 0)
1831 Call = RTCI.createRuntimeCall(
1832 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1833 else
1834 Call = RTCI.createRuntimeCall(
1835 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1836 }
1837
1839 return Call;
1840}
1841
1842Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1843 Value *ShadowValue,
1844 uint32_t TypeStoreSize) {
1845 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1846 // Addr & (Granularity - 1)
1847 Value *LastAccessedByte =
1848 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1849 // (Addr & (Granularity - 1)) + size - 1
1850 if (TypeStoreSize / 8 > 1)
1851 LastAccessedByte = IRB.CreateAdd(
1852 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1853 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1854 LastAccessedByte =
1855 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1856 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1857 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1858}
1859
1860Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1861 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1862 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1863 // Do not instrument unsupported addrspaces.
1865 return nullptr;
1866 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1867 // Follow host instrumentation for global and constant addresses.
1868 if (PtrTy->getPointerAddressSpace() != 0)
1869 return InsertBefore;
1870 // Instrument generic addresses in supported addressspaces.
1871 IRBuilder<> IRB(InsertBefore);
1872 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1873 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1874 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1875 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1876 Value *AddrSpaceZeroLanding =
1877 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1878 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1879 return InsertBefore;
1880}
1881
1882Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1883 Value *Cond, bool Recover) {
1884 Module &M = *IRB.GetInsertBlock()->getModule();
1885 Value *ReportCond = Cond;
1886 if (!Recover) {
1887 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1888 IRB.getInt1Ty());
1889 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1890 }
1891
1892 auto *Trm =
1893 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1895 Trm->getParent()->setName("asan.report");
1896
1897 if (Recover)
1898 return Trm;
1899
1900 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1901 IRB.SetInsertPoint(Trm);
1902 return IRB.CreateCall(
1903 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1904}
1905
1906void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1907 Instruction *InsertBefore, Value *Addr,
1908 MaybeAlign Alignment,
1909 uint32_t TypeStoreSize, bool IsWrite,
1910 Value *SizeArgument, bool UseCalls,
1911 uint32_t Exp,
1912 RuntimeCallInserter &RTCI) {
1913 if (TargetTriple.isAMDGPU()) {
1914 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1915 TypeStoreSize, IsWrite, SizeArgument);
1916 if (!InsertBefore)
1917 return;
1918 }
1919
1920 InstrumentationIRBuilder IRB(InsertBefore);
1921 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1922
1923 if (UseCalls && ClOptimizeCallbacks) {
1924 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1925 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1926 {IRB.CreatePointerCast(Addr, PtrTy),
1927 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1928 return;
1929 }
1930
1931 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1932 if (UseCalls) {
1933 if (Exp == 0)
1934 RTCI.createRuntimeCall(
1935 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1936 else
1937 RTCI.createRuntimeCall(
1938 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1939 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1940 return;
1941 }
1942
1943 Type *ShadowTy =
1944 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1945 Type *ShadowPtrTy = PointerType::get(*C, 0);
1946 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1947 const uint64_t ShadowAlign =
1948 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1949 Value *ShadowValue = IRB.CreateAlignedLoad(
1950 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1951
1952 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1953 size_t Granularity = 1ULL << Mapping.Scale;
1954 Instruction *CrashTerm = nullptr;
1955
1956 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1957
1958 if (TargetTriple.isAMDGCN()) {
1959 if (GenSlowPath) {
1960 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1961 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1962 }
1963 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1964 } else if (GenSlowPath) {
1965 // We use branch weights for the slow path check, to indicate that the slow
1966 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1968 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1969 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1970 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1971 IRB.SetInsertPoint(CheckTerm);
1972 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1973 if (Recover) {
1974 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1975 } else {
1976 BasicBlock *CrashBlock =
1977 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1978 CrashTerm = new UnreachableInst(*C, CrashBlock);
1979 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1980 ReplaceInstWithInst(CheckTerm, NewTerm);
1981 }
1982 } else {
1983 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1984 }
1985
1986 Instruction *Crash = generateCrashCode(
1987 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1988 if (OrigIns->getDebugLoc())
1989 Crash->setDebugLoc(OrigIns->getDebugLoc());
1990}
1991
1992// Instrument unusual size or unusual alignment.
1993// We can not do it with a single check, so we do 1-byte check for the first
1994// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1995// to report the actual access size.
1996void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1997 Instruction *I, Instruction *InsertBefore, Value *Addr,
1998 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
1999 uint32_t Exp, RuntimeCallInserter &RTCI) {
2000 InstrumentationIRBuilder IRB(InsertBefore);
2001 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
2002 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
2003
2004 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2005 if (UseCalls) {
2006 if (Exp == 0)
2007 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2008 {AddrLong, Size});
2009 else
2010 RTCI.createRuntimeCall(
2011 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2012 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2013 } else {
2014 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2015 Value *LastByte = IRB.CreateIntToPtr(
2016 IRB.CreateAdd(AddrLong, SizeMinusOne),
2017 Addr->getType());
2018 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2019 RTCI);
2020 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2021 Exp, RTCI);
2022 }
2023}
2024
2025void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2026 // Set up the arguments to our poison/unpoison functions.
2027 IRBuilder<> IRB(&GlobalInit.front(),
2028 GlobalInit.front().getFirstInsertionPt());
2029
2030 // Add a call to poison all external globals before the given function starts.
2031 Value *ModuleNameAddr =
2032 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2033 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2034
2035 // Add calls to unpoison all globals before each return instruction.
2036 for (auto &BB : GlobalInit)
2038 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2039}
2040
2041void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2042 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2043 if (!GV)
2044 return;
2045
2047 if (!CA)
2048 return;
2049
2050 for (Use &OP : CA->operands()) {
2051 if (isa<ConstantAggregateZero>(OP)) continue;
2053
2054 // Must have a function or null ptr.
2055 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2056 if (F->getName() == kAsanModuleCtorName) continue;
2057 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2058 // Don't instrument CTORs that will run before asan.module_ctor.
2059 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2060 continue;
2061 poisonOneInitializer(*F);
2062 }
2063 }
2064}
2065
2066const GlobalVariable *
2067ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2068 // In case this function should be expanded to include rules that do not just
2069 // apply when CompileKernel is true, either guard all existing rules with an
2070 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2071 // should also apply to user space.
2072 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2073
2074 const Constant *C = GA.getAliasee();
2075
2076 // When compiling the kernel, globals that are aliased by symbols prefixed
2077 // by "__" are special and cannot be padded with a redzone.
2078 if (GA.getName().starts_with("__"))
2079 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2080
2081 return nullptr;
2082}
2083
2084bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2085 Type *Ty = G->getValueType();
2086 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2087
2088 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2089 return false;
2090 if (!Ty->isSized()) return false;
2091 if (!G->hasInitializer()) return false;
2092 // Globals in address space 1 and 4 are supported for AMDGPU.
2093 if (G->getAddressSpace() &&
2094 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2095 return false;
2096 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2097 // Two problems with thread-locals:
2098 // - The address of the main thread's copy can't be computed at link-time.
2099 // - Need to poison all copies, not just the main thread's one.
2100 if (G->isThreadLocal()) return false;
2101 // For now, just ignore this Global if the alignment is large.
2102 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2103
2104 // For non-COFF targets, only instrument globals known to be defined by this
2105 // TU.
2106 // FIXME: We can instrument comdat globals on ELF if we are using the
2107 // GC-friendly metadata scheme.
2108 if (!TargetTriple.isOSBinFormatCOFF()) {
2109 if (!G->hasExactDefinition() || G->hasComdat())
2110 return false;
2111 } else {
2112 // On COFF, don't instrument non-ODR linkages.
2113 if (G->isInterposable())
2114 return false;
2115 // If the global has AvailableExternally linkage, then it is not in this
2116 // module, which means it does not need to be instrumented.
2117 if (G->hasAvailableExternallyLinkage())
2118 return false;
2119 }
2120
2121 // If a comdat is present, it must have a selection kind that implies ODR
2122 // semantics: no duplicates, any, or exact match.
2123 if (Comdat *C = G->getComdat()) {
2124 switch (C->getSelectionKind()) {
2125 case Comdat::Any:
2126 case Comdat::ExactMatch:
2128 break;
2129 case Comdat::Largest:
2130 case Comdat::SameSize:
2131 return false;
2132 }
2133 }
2134
2135 if (G->hasSection()) {
2136 // The kernel uses explicit sections for mostly special global variables
2137 // that we should not instrument. E.g. the kernel may rely on their layout
2138 // without redzones, or remove them at link time ("discard.*"), etc.
2139 if (CompileKernel)
2140 return false;
2141
2142 StringRef Section = G->getSection();
2143
2144 // Globals from llvm.metadata aren't emitted, do not instrument them.
2145 if (Section == "llvm.metadata") return false;
2146 // Do not instrument globals from special LLVM sections.
2147 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2148 return false;
2149
2150 // Do not instrument function pointers to initialization and termination
2151 // routines: dynamic linker will not properly handle redzones.
2152 if (Section.starts_with(".preinit_array") ||
2153 Section.starts_with(".init_array") ||
2154 Section.starts_with(".fini_array")) {
2155 return false;
2156 }
2157
2158 // Do not instrument user-defined sections (with names resembling
2159 // valid C identifiers)
2160 if (TargetTriple.isOSBinFormatELF()) {
2161 if (llvm::all_of(Section,
2162 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2163 return false;
2164 }
2165
2166 // On COFF, if the section name contains '$', it is highly likely that the
2167 // user is using section sorting to create an array of globals similar to
2168 // the way initialization callbacks are registered in .init_array and
2169 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2170 // to such globals is counterproductive, because the intent is that they
2171 // will form an array, and out-of-bounds accesses are expected.
2172 // See https://github.com/google/sanitizers/issues/305
2173 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2174 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2175 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2176 << *G << "\n");
2177 return false;
2178 }
2179
2180 if (TargetTriple.isOSBinFormatMachO()) {
2181 StringRef ParsedSegment, ParsedSection;
2182 unsigned TAA = 0, StubSize = 0;
2183 bool TAAParsed;
2185 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2186
2187 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2188 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2189 // them.
2190 if (ParsedSegment == "__OBJC" ||
2191 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2192 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2193 return false;
2194 }
2195 // See https://github.com/google/sanitizers/issues/32
2196 // Constant CFString instances are compiled in the following way:
2197 // -- the string buffer is emitted into
2198 // __TEXT,__cstring,cstring_literals
2199 // -- the constant NSConstantString structure referencing that buffer
2200 // is placed into __DATA,__cfstring
2201 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2202 // Moreover, it causes the linker to crash on OS X 10.7
2203 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2204 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2205 return false;
2206 }
2207 // The linker merges the contents of cstring_literals and removes the
2208 // trailing zeroes.
2209 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2210 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2211 return false;
2212 }
2213 }
2214 }
2215
2216 if (CompileKernel) {
2217 // Globals that prefixed by "__" are special and cannot be padded with a
2218 // redzone.
2219 if (G->getName().starts_with("__"))
2220 return false;
2221 }
2222
2223 return true;
2224}
2225
2226// On Mach-O platforms, we emit global metadata in a separate section of the
2227// binary in order to allow the linker to properly dead strip. This is only
2228// supported on recent versions of ld64.
2229bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2230 if (!TargetTriple.isOSBinFormatMachO())
2231 return false;
2232
2233 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2234 return true;
2235 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2236 return true;
2237 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2238 return true;
2239 if (TargetTriple.isDriverKit())
2240 return true;
2241 if (TargetTriple.isXROS())
2242 return true;
2243
2244 return false;
2245}
2246
2247StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2248 switch (TargetTriple.getObjectFormat()) {
2249 case Triple::COFF: return ".ASAN$GL";
2250 case Triple::ELF: return "asan_globals";
2251 case Triple::MachO: return "__DATA,__asan_globals,regular";
2252 case Triple::Wasm:
2253 case Triple::GOFF:
2254 case Triple::SPIRV:
2255 case Triple::XCOFF:
2258 "ModuleAddressSanitizer not implemented for object file format");
2260 break;
2261 }
2262 llvm_unreachable("unsupported object format");
2263}
2264
2265void ModuleAddressSanitizer::initializeCallbacks() {
2266 IRBuilder<> IRB(*C);
2267
2268 // Declare our poisoning and unpoisoning functions.
2269 AsanPoisonGlobals =
2270 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2271 AsanUnpoisonGlobals =
2272 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2273
2274 // Declare functions that register/unregister globals.
2275 AsanRegisterGlobals = M.getOrInsertFunction(
2276 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2277 AsanUnregisterGlobals = M.getOrInsertFunction(
2278 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2279
2280 // Declare the functions that find globals in a shared object and then invoke
2281 // the (un)register function on them.
2282 AsanRegisterImageGlobals = M.getOrInsertFunction(
2283 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2284 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2286
2287 AsanRegisterElfGlobals =
2288 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2289 IntptrTy, IntptrTy, IntptrTy);
2290 AsanUnregisterElfGlobals =
2291 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2292 IntptrTy, IntptrTy, IntptrTy);
2293}
2294
2295// Put the metadata and the instrumented global in the same group. This ensures
2296// that the metadata is discarded if the instrumented global is discarded.
2297void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2298 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2299 Module &M = *G->getParent();
2300 Comdat *C = G->getComdat();
2301 if (!C) {
2302 if (!G->hasName()) {
2303 // If G is unnamed, it must be internal. Give it an artificial name
2304 // so we can put it in a comdat.
2305 assert(G->hasLocalLinkage());
2306 G->setName(genName("anon_global"));
2307 }
2308
2309 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2310 std::string Name = std::string(G->getName());
2311 Name += InternalSuffix;
2312 C = M.getOrInsertComdat(Name);
2313 } else {
2314 C = M.getOrInsertComdat(G->getName());
2315 }
2316
2317 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2318 // linkage to internal linkage so that a symbol table entry is emitted. This
2319 // is necessary in order to create the comdat group.
2320 if (TargetTriple.isOSBinFormatCOFF()) {
2321 C->setSelectionKind(Comdat::NoDeduplicate);
2322 if (G->hasPrivateLinkage())
2323 G->setLinkage(GlobalValue::InternalLinkage);
2324 }
2325 G->setComdat(C);
2326 }
2327
2328 assert(G->hasComdat());
2329 Metadata->setComdat(G->getComdat());
2330}
2331
2332// Create a separate metadata global and put it in the appropriate ASan
2333// global registration section.
2335ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2336 StringRef OriginalName) {
2337 auto Linkage = TargetTriple.isOSBinFormatMachO()
2341 M, Initializer->getType(), false, Linkage, Initializer,
2342 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2343 Metadata->setSection(getGlobalMetadataSection());
2344 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2345 // relocation pressure.
2347 return Metadata;
2348}
2349
2350Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2351 AsanDtorFunction = Function::createWithDefaultAttr(
2354 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2355 // Ensure Dtor cannot be discarded, even if in a comdat.
2356 appendToUsed(M, {AsanDtorFunction});
2357 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2358
2359 return ReturnInst::Create(*C, AsanDtorBB);
2360}
2361
2362void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2363 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2364 ArrayRef<Constant *> MetadataInitializers) {
2365 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2366 auto &DL = M.getDataLayout();
2367
2368 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2369 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2370 Constant *Initializer = MetadataInitializers[i];
2371 GlobalVariable *G = ExtendedGlobals[i];
2372 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2373 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2374 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2375 MetadataGlobals[i] = Metadata;
2376
2377 // The MSVC linker always inserts padding when linking incrementally. We
2378 // cope with that by aligning each struct to its size, which must be a power
2379 // of two.
2380 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2381 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2382 "global metadata will not be padded appropriately");
2383 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2384
2385 SetComdatForGlobalMetadata(G, Metadata, "");
2386 }
2387
2388 // Update llvm.compiler.used, adding the new metadata globals. This is
2389 // needed so that during LTO these variables stay alive.
2390 if (!MetadataGlobals.empty())
2391 appendToCompilerUsed(M, MetadataGlobals);
2392}
2393
2394void ModuleAddressSanitizer::instrumentGlobalsELF(
2395 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2396 ArrayRef<Constant *> MetadataInitializers,
2397 const std::string &UniqueModuleId) {
2398 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2399
2400 // Putting globals in a comdat changes the semantic and potentially cause
2401 // false negative odr violations at link time. If odr indicators are used, we
2402 // keep the comdat sections, as link time odr violations will be dectected on
2403 // the odr indicator symbols.
2404 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2405
2406 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2407 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2408 GlobalVariable *G = ExtendedGlobals[i];
2410 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2411 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2412 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2413 MetadataGlobals[i] = Metadata;
2414
2415 if (UseComdatForGlobalsGC)
2416 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2417 }
2418
2419 // Update llvm.compiler.used, adding the new metadata globals. This is
2420 // needed so that during LTO these variables stay alive.
2421 if (!MetadataGlobals.empty())
2422 appendToCompilerUsed(M, MetadataGlobals);
2423
2424 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2425 // to look up the loaded image that contains it. Second, we can store in it
2426 // whether registration has already occurred, to prevent duplicate
2427 // registration.
2428 //
2429 // Common linkage ensures that there is only one global per shared library.
2430 GlobalVariable *RegisteredFlag = new GlobalVariable(
2431 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2432 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2434
2435 // Create start and stop symbols.
2436 GlobalVariable *StartELFMetadata = new GlobalVariable(
2437 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2438 "__start_" + getGlobalMetadataSection());
2440 GlobalVariable *StopELFMetadata = new GlobalVariable(
2441 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2442 "__stop_" + getGlobalMetadataSection());
2444
2445 // Create a call to register the globals with the runtime.
2446 if (ConstructorKind == AsanCtorKind::Global)
2447 IRB.CreateCall(AsanRegisterElfGlobals,
2448 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2449 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2450 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2451
2452 // We also need to unregister globals at the end, e.g., when a shared library
2453 // gets closed.
2454 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2455 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2456 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2457 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2458 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2459 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2460 }
2461}
2462
2463void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2464 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2465 ArrayRef<Constant *> MetadataInitializers) {
2466 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2467
2468 // On recent Mach-O platforms, use a structure which binds the liveness of
2469 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2470 // created to be added to llvm.compiler.used
2471 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2472 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2473
2474 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2475 Constant *Initializer = MetadataInitializers[i];
2476 GlobalVariable *G = ExtendedGlobals[i];
2477 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2478
2479 // On recent Mach-O platforms, we emit the global metadata in a way that
2480 // allows the linker to properly strip dead globals.
2481 auto LivenessBinder =
2482 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2484 GlobalVariable *Liveness = new GlobalVariable(
2485 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2486 Twine("__asan_binder_") + G->getName());
2487 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2488 LivenessGlobals[i] = Liveness;
2489 }
2490
2491 // Update llvm.compiler.used, adding the new liveness globals. This is
2492 // needed so that during LTO these variables stay alive. The alternative
2493 // would be to have the linker handling the LTO symbols, but libLTO
2494 // current API does not expose access to the section for each symbol.
2495 if (!LivenessGlobals.empty())
2496 appendToCompilerUsed(M, LivenessGlobals);
2497
2498 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2499 // to look up the loaded image that contains it. Second, we can store in it
2500 // whether registration has already occurred, to prevent duplicate
2501 // registration.
2502 //
2503 // common linkage ensures that there is only one global per shared library.
2504 GlobalVariable *RegisteredFlag = new GlobalVariable(
2505 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2506 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2508
2509 if (ConstructorKind == AsanCtorKind::Global)
2510 IRB.CreateCall(AsanRegisterImageGlobals,
2511 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2512
2513 // We also need to unregister globals at the end, e.g., when a shared library
2514 // gets closed.
2515 if (DestructorKind != AsanDtorKind::None) {
2516 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2517 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2518 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2519 }
2520}
2521
2522void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2523 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2524 ArrayRef<Constant *> MetadataInitializers) {
2525 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2526 unsigned N = ExtendedGlobals.size();
2527 assert(N > 0);
2528
2529 // On platforms that don't have a custom metadata section, we emit an array
2530 // of global metadata structures.
2531 ArrayType *ArrayOfGlobalStructTy =
2532 ArrayType::get(MetadataInitializers[0]->getType(), N);
2533 auto AllGlobals = new GlobalVariable(
2534 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2535 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2536 if (Mapping.Scale > 3)
2537 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2538
2539 if (ConstructorKind == AsanCtorKind::Global)
2540 IRB.CreateCall(AsanRegisterGlobals,
2541 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2542 ConstantInt::get(IntptrTy, N)});
2543
2544 // We also need to unregister globals at the end, e.g., when a shared library
2545 // gets closed.
2546 if (DestructorKind != AsanDtorKind::None) {
2547 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2548 IrbDtor.CreateCall(AsanUnregisterGlobals,
2549 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2550 ConstantInt::get(IntptrTy, N)});
2551 }
2552}
2553
2554// This function replaces all global variables with new variables that have
2555// trailing redzones. It also creates a function that poisons
2556// redzones and inserts this function into llvm.global_ctors.
2557// Sets *CtorComdat to true if the global registration code emitted into the
2558// asan constructor is comdat-compatible.
2559void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2560 bool *CtorComdat) {
2561 // Build set of globals that are aliased by some GA, where
2562 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2563 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2564 if (CompileKernel) {
2565 for (auto &GA : M.aliases()) {
2566 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2567 AliasedGlobalExclusions.insert(GV);
2568 }
2569 }
2570
2571 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2572 for (auto &G : M.globals()) {
2573 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2574 GlobalsToChange.push_back(&G);
2575 }
2576
2577 size_t n = GlobalsToChange.size();
2578 auto &DL = M.getDataLayout();
2579
2580 // A global is described by a structure
2581 // size_t beg;
2582 // size_t size;
2583 // size_t size_with_redzone;
2584 // const char *name;
2585 // const char *module_name;
2586 // size_t has_dynamic_init;
2587 // size_t padding_for_windows_msvc_incremental_link;
2588 // size_t odr_indicator;
2589 // We initialize an array of such structures and pass it to a run-time call.
2590 StructType *GlobalStructTy =
2591 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2592 IntptrTy, IntptrTy, IntptrTy);
2594 SmallVector<Constant *, 16> Initializers(n);
2595
2596 for (size_t i = 0; i < n; i++) {
2597 GlobalVariable *G = GlobalsToChange[i];
2598
2600 if (G->hasSanitizerMetadata())
2601 MD = G->getSanitizerMetadata();
2602
2603 // The runtime library tries demangling symbol names in the descriptor but
2604 // functionality like __cxa_demangle may be unavailable (e.g.
2605 // -static-libstdc++). So we demangle the symbol names here.
2606 std::string NameForGlobal = G->getName().str();
2609 /*AllowMerging*/ true, genName("global"));
2610
2611 Type *Ty = G->getValueType();
2612 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2613 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2614 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2615
2616 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2617 Constant *NewInitializer = ConstantStruct::get(
2618 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2619
2620 // Create a new global variable with enough space for a redzone.
2621 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2622 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2624 GlobalVariable *NewGlobal = new GlobalVariable(
2625 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2626 G->getThreadLocalMode(), G->getAddressSpace());
2627 NewGlobal->copyAttributesFrom(G);
2628 NewGlobal->setComdat(G->getComdat());
2629 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2630 // Don't fold globals with redzones. ODR violation detector and redzone
2631 // poisoning implicitly creates a dependence on the global's address, so it
2632 // is no longer valid for it to be marked unnamed_addr.
2634
2635 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2636 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2637 G->isConstant()) {
2638 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2639 if (Seq && Seq->isCString())
2640 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2641 }
2642
2643 // Transfer the debug info and type metadata. The payload starts at offset
2644 // zero so we can copy the metadata over as is.
2645 NewGlobal->copyMetadata(G, 0);
2646
2647 Value *Indices2[2];
2648 Indices2[0] = IRB.getInt32(0);
2649 Indices2[1] = IRB.getInt32(0);
2650
2652 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2653 NewGlobal->takeName(G);
2654 G->eraseFromParent();
2655 NewGlobals[i] = NewGlobal;
2656
2657 Constant *ODRIndicator = Constant::getNullValue(IntptrTy);
2658 GlobalValue *InstrumentedGlobal = NewGlobal;
2659
2660 bool CanUsePrivateAliases =
2661 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2662 TargetTriple.isOSBinFormatWasm();
2663 if (CanUsePrivateAliases && UsePrivateAlias) {
2664 // Create local alias for NewGlobal to avoid crash on ODR between
2665 // instrumented and non-instrumented libraries.
2666 InstrumentedGlobal =
2668 }
2669
2670 // ODR should not happen for local linkage.
2671 if (NewGlobal->hasLocalLinkage()) {
2672 ODRIndicator = ConstantInt::get(IntptrTy, -1);
2673 } else if (UseOdrIndicator) {
2674 // With local aliases, we need to provide another externally visible
2675 // symbol __odr_asan_XXX to detect ODR violation.
2676 auto *ODRIndicatorSym =
2677 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2679 kODRGenPrefix + NameForGlobal, nullptr,
2680 NewGlobal->getThreadLocalMode());
2681
2682 // Set meaningful attributes for indicator symbol.
2683 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2684 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2685 ODRIndicatorSym->setAlignment(Align(1));
2686 ODRIndicator = ConstantExpr::getPtrToInt(ODRIndicatorSym, IntptrTy);
2687 }
2688
2689 Constant *Initializer = ConstantStruct::get(
2690 GlobalStructTy,
2691 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2692 ConstantInt::get(IntptrTy, SizeInBytes),
2693 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2694 ConstantExpr::getPointerCast(Name, IntptrTy),
2695 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2696 ConstantInt::get(IntptrTy, MD.IsDynInit),
2697 Constant::getNullValue(IntptrTy), ODRIndicator);
2698
2699 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2700
2701 Initializers[i] = Initializer;
2702 }
2703
2704 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2705 // ConstantMerge'ing them.
2706 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2707 for (size_t i = 0; i < n; i++) {
2708 GlobalVariable *G = NewGlobals[i];
2709 if (G->getName().empty()) continue;
2710 GlobalsToAddToUsedList.push_back(G);
2711 }
2712 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2713
2714 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2715 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2716 // linkage unit will only have one module constructor, and (b) the register
2717 // function will be called. The module destructor is not created when n ==
2718 // 0.
2719 *CtorComdat = true;
2720 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2721 } else if (n == 0) {
2722 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2723 // all compile units will have identical module constructor/destructor.
2724 *CtorComdat = TargetTriple.isOSBinFormatELF();
2725 } else {
2726 *CtorComdat = false;
2727 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2728 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2729 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2730 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2731 } else {
2732 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2733 }
2734 }
2735
2736 // Create calls for poisoning before initializers run and unpoisoning after.
2737 if (ClInitializers)
2738 createInitializerPoisonCalls();
2739
2740 LLVM_DEBUG(dbgs() << M);
2741}
2742
2743uint64_t
2744ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2745 constexpr uint64_t kMaxRZ = 1 << 18;
2746 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2747
2748 uint64_t RZ = 0;
2749 if (SizeInBytes <= MinRZ / 2) {
2750 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2751 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2752 // half of MinRZ.
2753 RZ = MinRZ - SizeInBytes;
2754 } else {
2755 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2756 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2757
2758 // Round up to multiple of MinRZ.
2759 if (SizeInBytes % MinRZ)
2760 RZ += MinRZ - (SizeInBytes % MinRZ);
2761 }
2762
2763 assert((RZ + SizeInBytes) % MinRZ == 0);
2764
2765 return RZ;
2766}
2767
2768int ModuleAddressSanitizer::GetAsanVersion() const {
2769 int LongSize = M.getDataLayout().getPointerSizeInBits();
2770 bool isAndroid = M.getTargetTriple().isAndroid();
2771 int Version = 8;
2772 // 32-bit Android is one version ahead because of the switch to dynamic
2773 // shadow.
2774 Version += (LongSize == 32 && isAndroid);
2775 return Version;
2776}
2777
2778GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2779 if (!ModuleName) {
2780 // We shouldn't merge same module names, as this string serves as unique
2781 // module ID in runtime.
2782 ModuleName =
2783 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2784 /*AllowMerging*/ false, genName("module"));
2785 }
2786 return ModuleName;
2787}
2788
2789bool ModuleAddressSanitizer::instrumentModule() {
2790 initializeCallbacks();
2791
2792 for (Function &F : M)
2793 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2794
2795 // Create a module constructor. A destructor is created lazily because not all
2796 // platforms, and not all modules need it.
2797 if (ConstructorKind == AsanCtorKind::Global) {
2798 if (CompileKernel) {
2799 // The kernel always builds with its own runtime, and therefore does not
2800 // need the init and version check calls.
2801 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2802 } else {
2803 std::string AsanVersion = std::to_string(GetAsanVersion());
2804 std::string VersionCheckName =
2805 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2806 std::tie(AsanCtorFunction, std::ignore) =
2808 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2809 /*InitArgs=*/{}, VersionCheckName);
2810 }
2811 }
2812
2813 bool CtorComdat = true;
2814 if (ClGlobals) {
2815 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2816 if (AsanCtorFunction) {
2817 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2818 instrumentGlobals(IRB, &CtorComdat);
2819 } else {
2820 IRBuilder<> IRB(*C);
2821 instrumentGlobals(IRB, &CtorComdat);
2822 }
2823 }
2824
2825 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2826
2827 // Put the constructor and destructor in comdat if both
2828 // (1) global instrumentation is not TU-specific
2829 // (2) target is ELF.
2830 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2831 if (AsanCtorFunction) {
2832 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2833 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2834 }
2835 if (AsanDtorFunction) {
2836 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2837 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2838 }
2839 } else {
2840 if (AsanCtorFunction)
2841 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2842 if (AsanDtorFunction)
2843 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2844 }
2845
2846 return true;
2847}
2848
2849void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2850 IRBuilder<> IRB(*C);
2851 // Create __asan_report* callbacks.
2852 // IsWrite, TypeSize and Exp are encoded in the function name.
2853 for (int Exp = 0; Exp < 2; Exp++) {
2854 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2855 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2856 const std::string ExpStr = Exp ? "exp_" : "";
2857 const std::string EndingStr = Recover ? "_noabort" : "";
2858
2859 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2860 SmallVector<Type *, 2> Args1{1, IntptrTy};
2861 AttributeList AL2;
2862 AttributeList AL1;
2863 if (Exp) {
2864 Type *ExpType = Type::getInt32Ty(*C);
2865 Args2.push_back(ExpType);
2866 Args1.push_back(ExpType);
2867 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2868 AL2 = AL2.addParamAttribute(*C, 2, AK);
2869 AL1 = AL1.addParamAttribute(*C, 1, AK);
2870 }
2871 }
2872 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2873 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2874 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2875
2876 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2877 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2878 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2879
2880 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2881 AccessSizeIndex++) {
2882 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2883 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2884 M.getOrInsertFunction(
2885 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2886 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2887
2888 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2889 M.getOrInsertFunction(
2890 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2891 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2892 }
2893 }
2894 }
2895
2896 const std::string MemIntrinCallbackPrefix =
2897 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2898 ? std::string("")
2900 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2901 PtrTy, PtrTy, PtrTy, IntptrTy);
2902 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2903 PtrTy, PtrTy, IntptrTy);
2904 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2905 TLI->getAttrList(C, {1}, /*Signed=*/false),
2906 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2907
2908 AsanHandleNoReturnFunc =
2909 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2910
2911 AsanPtrCmpFunction =
2912 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2913 AsanPtrSubFunction =
2914 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2915 if (Mapping.InGlobal)
2916 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2917 ArrayType::get(IRB.getInt8Ty(), 0));
2918
2919 AMDGPUAddressShared =
2920 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2921 AMDGPUAddressPrivate =
2922 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2923}
2924
2925bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2926 // For each NSObject descendant having a +load method, this method is invoked
2927 // by the ObjC runtime before any of the static constructors is called.
2928 // Therefore we need to instrument such methods with a call to __asan_init
2929 // at the beginning in order to initialize our runtime before any access to
2930 // the shadow memory.
2931 // We cannot just ignore these methods, because they may call other
2932 // instrumented functions.
2933 if (F.getName().contains(" load]")) {
2934 FunctionCallee AsanInitFunction =
2935 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2936 IRBuilder<> IRB(&F.front(), F.front().begin());
2937 IRB.CreateCall(AsanInitFunction, {});
2938 return true;
2939 }
2940 return false;
2941}
2942
2943bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2944 // Generate code only when dynamic addressing is needed.
2945 if (Mapping.Offset != kDynamicShadowSentinel)
2946 return false;
2947
2948 IRBuilder<> IRB(&F.front().front());
2949 if (Mapping.InGlobal) {
2951 // An empty inline asm with input reg == output reg.
2952 // An opaque pointer-to-int cast, basically.
2954 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2955 StringRef(""), StringRef("=r,0"),
2956 /*hasSideEffects=*/false);
2957 LocalDynamicShadow =
2958 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2959 } else {
2960 LocalDynamicShadow =
2961 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2962 }
2963 } else {
2964 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2966 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2967 }
2968 return true;
2969}
2970
2971void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2972 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2973 // to it as uninteresting. This assumes we haven't started processing allocas
2974 // yet. This check is done up front because iterating the use list in
2975 // isInterestingAlloca would be algorithmically slower.
2976 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2977
2978 // Try to get the declaration of llvm.localescape. If it's not in the module,
2979 // we can exit early.
2980 if (!F.getParent()->getFunction("llvm.localescape")) return;
2981
2982 // Look for a call to llvm.localescape call in the entry block. It can't be in
2983 // any other block.
2984 for (Instruction &I : F.getEntryBlock()) {
2986 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2987 // We found a call. Mark all the allocas passed in as uninteresting.
2988 for (Value *Arg : II->args()) {
2989 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2990 assert(AI && AI->isStaticAlloca() &&
2991 "non-static alloca arg to localescape");
2992 ProcessedAllocas[AI] = false;
2993 }
2994 break;
2995 }
2996 }
2997}
2998// Mitigation for https://github.com/google/sanitizers/issues/749
2999// We don't instrument Windows catch-block parameters to avoid
3000// interfering with exception handling assumptions.
3001void AddressSanitizer::markCatchParametersAsUninteresting(Function &F) {
3002 for (BasicBlock &BB : F) {
3003 for (Instruction &I : BB) {
3004 if (auto *CatchPad = dyn_cast<CatchPadInst>(&I)) {
3005 // Mark the parameters to a catch-block as uninteresting to avoid
3006 // instrumenting them.
3007 for (Value *Operand : CatchPad->arg_operands())
3008 if (auto *AI = dyn_cast<AllocaInst>(Operand))
3009 ProcessedAllocas[AI] = false;
3010 }
3011 }
3012 }
3013}
3014
3015bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
3016 bool ShouldInstrument =
3017 ClDebugMin < 0 || ClDebugMax < 0 ||
3018 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3019 Instrumented++;
3020 return !ShouldInstrument;
3021}
3022
3023bool AddressSanitizer::instrumentFunction(Function &F,
3024 const TargetLibraryInfo *TLI,
3025 const TargetTransformInfo *TTI) {
3026 bool FunctionModified = false;
3027
3028 // Do not apply any instrumentation for naked functions.
3029 if (F.hasFnAttribute(Attribute::Naked))
3030 return FunctionModified;
3031
3032 // If needed, insert __asan_init before checking for SanitizeAddress attr.
3033 // This function needs to be called even if the function body is not
3034 // instrumented.
3035 if (maybeInsertAsanInitAtFunctionEntry(F))
3036 FunctionModified = true;
3037
3038 // Leave if the function doesn't need instrumentation.
3039 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3040
3041 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3042 return FunctionModified;
3043
3044 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3045
3046 initializeCallbacks(TLI);
3047
3048 FunctionStateRAII CleanupObj(this);
3049
3050 RuntimeCallInserter RTCI(F);
3051
3052 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3053
3054 // We can't instrument allocas used with llvm.localescape. Only static allocas
3055 // can be passed to that intrinsic.
3056 markEscapedLocalAllocas(F);
3057
3058 if (TargetTriple.isOSWindows())
3059 markCatchParametersAsUninteresting(F);
3060
3061 // We want to instrument every address only once per basic block (unless there
3062 // are calls between uses).
3063 SmallPtrSet<Value *, 16> TempsToInstrument;
3064 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3065 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3066 SmallVector<Instruction *, 8> NoReturnCalls;
3068 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3069
3070 // Fill the set of memory operations to instrument.
3071 for (auto &BB : F) {
3072 AllBlocks.push_back(&BB);
3073 TempsToInstrument.clear();
3074 int NumInsnsPerBB = 0;
3075 for (auto &Inst : BB) {
3076 if (LooksLikeCodeInBug11395(&Inst)) return false;
3077 // Skip instructions inserted by another instrumentation.
3078 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3079 continue;
3080 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3081 getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
3082
3083 if (!InterestingOperands.empty()) {
3084 for (auto &Operand : InterestingOperands) {
3085 if (ClOpt && ClOptSameTemp) {
3086 Value *Ptr = Operand.getPtr();
3087 // If we have a mask, skip instrumentation if we've already
3088 // instrumented the full object. But don't add to TempsToInstrument
3089 // because we might get another load/store with a different mask.
3090 if (Operand.MaybeMask) {
3091 if (TempsToInstrument.count(Ptr))
3092 continue; // We've seen this (whole) temp in the current BB.
3093 } else {
3094 if (!TempsToInstrument.insert(Ptr).second)
3095 continue; // We've seen this temp in the current BB.
3096 }
3097 }
3098 OperandsToInstrument.push_back(Operand);
3099 NumInsnsPerBB++;
3100 }
3101 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3105 PointerComparisonsOrSubtracts.push_back(&Inst);
3106 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3107 // ok, take it.
3108 IntrinToInstrument.push_back(MI);
3109 NumInsnsPerBB++;
3110 } else {
3111 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3112 // A call inside BB.
3113 TempsToInstrument.clear();
3114 if (CB->doesNotReturn())
3115 NoReturnCalls.push_back(CB);
3116 }
3117 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3119 }
3120 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3121 }
3122 }
3123
3124 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3125 OperandsToInstrument.size() + IntrinToInstrument.size() >
3126 (unsigned)InstrumentationWithCallsThreshold);
3127 const DataLayout &DL = F.getDataLayout();
3128 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3129
3130 // Instrument.
3131 int NumInstrumented = 0;
3132 for (auto &Operand : OperandsToInstrument) {
3133 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3134 instrumentMop(ObjSizeVis, Operand, UseCalls,
3135 F.getDataLayout(), RTCI);
3136 FunctionModified = true;
3137 }
3138 for (auto *Inst : IntrinToInstrument) {
3139 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3140 instrumentMemIntrinsic(Inst, RTCI);
3141 FunctionModified = true;
3142 }
3143
3144 FunctionStackPoisoner FSP(F, *this, RTCI);
3145 bool ChangedStack = FSP.runOnFunction();
3146
3147 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3148 // See e.g. https://github.com/google/sanitizers/issues/37
3149 for (auto *CI : NoReturnCalls) {
3150 IRBuilder<> IRB(CI);
3151 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3152 }
3153
3154 for (auto *Inst : PointerComparisonsOrSubtracts) {
3155 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3156 FunctionModified = true;
3157 }
3158
3159 if (ChangedStack || !NoReturnCalls.empty())
3160 FunctionModified = true;
3161
3162 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3163 << F << "\n");
3164
3165 return FunctionModified;
3166}
3167
3168// Workaround for bug 11395: we don't want to instrument stack in functions
3169// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3170// FIXME: remove once the bug 11395 is fixed.
3171bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3172 if (LongSize != 32) return false;
3174 if (!CI || !CI->isInlineAsm()) return false;
3175 if (CI->arg_size() <= 5)
3176 return false;
3177 // We have inline assembly with quite a few arguments.
3178 return true;
3179}
3180
3181void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3182 IRBuilder<> IRB(*C);
3183 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3184 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3185 const char *MallocNameTemplate =
3186 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3189 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3190 std::string Suffix = itostr(Index);
3191 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3192 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3193 AsanStackFreeFunc[Index] =
3194 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3195 IRB.getVoidTy(), IntptrTy, IntptrTy);
3196 }
3197 }
3198 if (ASan.UseAfterScope) {
3199 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3200 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3201 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3202 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3203 }
3204
3205 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3206 0xf3, 0xf5, 0xf8}) {
3207 std::ostringstream Name;
3209 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3210 AsanSetShadowFunc[Val] =
3211 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3212 }
3213
3214 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3215 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3216 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3217 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3218}
3219
3220void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3221 ArrayRef<uint8_t> ShadowBytes,
3222 size_t Begin, size_t End,
3223 IRBuilder<> &IRB,
3224 Value *ShadowBase) {
3225 if (Begin >= End)
3226 return;
3227
3228 const size_t LargestStoreSizeInBytes =
3229 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3230
3231 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3232
3233 // Poison given range in shadow using larges store size with out leading and
3234 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3235 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3236 // middle of a store.
3237 for (size_t i = Begin; i < End;) {
3238 if (!ShadowMask[i]) {
3239 assert(!ShadowBytes[i]);
3240 ++i;
3241 continue;
3242 }
3243
3244 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3245 // Fit store size into the range.
3246 while (StoreSizeInBytes > End - i)
3247 StoreSizeInBytes /= 2;
3248
3249 // Minimize store size by trimming trailing zeros.
3250 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3251 while (j <= StoreSizeInBytes / 2)
3252 StoreSizeInBytes /= 2;
3253 }
3254
3255 uint64_t Val = 0;
3256 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3257 if (IsLittleEndian)
3258 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3259 else
3260 Val = (Val << 8) | ShadowBytes[i + j];
3261 }
3262
3263 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3264 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3267 Align(1));
3268
3269 i += StoreSizeInBytes;
3270 }
3271}
3272
3273void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3274 ArrayRef<uint8_t> ShadowBytes,
3275 IRBuilder<> &IRB, Value *ShadowBase) {
3276 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3277}
3278
3279void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3280 ArrayRef<uint8_t> ShadowBytes,
3281 size_t Begin, size_t End,
3282 IRBuilder<> &IRB, Value *ShadowBase) {
3283 assert(ShadowMask.size() == ShadowBytes.size());
3284 size_t Done = Begin;
3285 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3286 if (!ShadowMask[i]) {
3287 assert(!ShadowBytes[i]);
3288 continue;
3289 }
3290 uint8_t Val = ShadowBytes[i];
3291 if (!AsanSetShadowFunc[Val])
3292 continue;
3293
3294 // Skip same values.
3295 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3296 }
3297
3298 if (j - i >= ASan.MaxInlinePoisoningSize) {
3299 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3300 RTCI.createRuntimeCall(
3301 IRB, AsanSetShadowFunc[Val],
3302 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3303 ConstantInt::get(IntptrTy, j - i)});
3304 Done = j;
3305 }
3306 }
3307
3308 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3309}
3310
3311// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3312// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3313static int StackMallocSizeClass(uint64_t LocalStackSize) {
3314 assert(LocalStackSize <= kMaxStackMallocSize);
3315 uint64_t MaxSize = kMinStackMallocSize;
3316 for (int i = 0;; i++, MaxSize *= 2)
3317 if (LocalStackSize <= MaxSize) return i;
3318 llvm_unreachable("impossible LocalStackSize");
3319}
3320
3321void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3322 Instruction *CopyInsertPoint = &F.front().front();
3323 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3324 // Insert after the dynamic shadow location is determined
3325 CopyInsertPoint = CopyInsertPoint->getNextNode();
3326 assert(CopyInsertPoint);
3327 }
3328 IRBuilder<> IRB(CopyInsertPoint);
3329 const DataLayout &DL = F.getDataLayout();
3330 for (Argument &Arg : F.args()) {
3331 if (Arg.hasByValAttr()) {
3332 Type *Ty = Arg.getParamByValType();
3333 const Align Alignment =
3334 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3335
3336 AllocaInst *AI = IRB.CreateAlloca(
3337 Ty, nullptr,
3338 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3339 ".byval");
3340 AI->setAlignment(Alignment);
3341 Arg.replaceAllUsesWith(AI);
3342
3343 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3344 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3345 }
3346 }
3347}
3348
3349PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3350 Value *ValueIfTrue,
3351 Instruction *ThenTerm,
3352 Value *ValueIfFalse) {
3353 PHINode *PHI = IRB.CreatePHI(ValueIfTrue->getType(), 2);
3354 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3355 PHI->addIncoming(ValueIfFalse, CondBlock);
3356 BasicBlock *ThenBlock = ThenTerm->getParent();
3357 PHI->addIncoming(ValueIfTrue, ThenBlock);
3358 return PHI;
3359}
3360
3361Value *FunctionStackPoisoner::createAllocaForLayout(
3362 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3363 AllocaInst *Alloca;
3364 if (Dynamic) {
3365 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3366 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3367 "MyAlloca");
3368 } else {
3369 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3370 nullptr, "MyAlloca");
3371 assert(Alloca->isStaticAlloca());
3372 }
3373 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3374 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3375 Alloca->setAlignment(Align(FrameAlignment));
3376 return Alloca;
3377}
3378
3379void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3380 BasicBlock &FirstBB = *F.begin();
3381 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3382 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3383 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3384 DynamicAllocaLayout->setAlignment(Align(32));
3385}
3386
3387void FunctionStackPoisoner::processDynamicAllocas() {
3388 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3389 assert(DynamicAllocaPoisonCallVec.empty());
3390 return;
3391 }
3392
3393 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3394 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3395 assert(APC.InsBefore);
3396 assert(APC.AI);
3397 assert(ASan.isInterestingAlloca(*APC.AI));
3398 assert(!APC.AI->isStaticAlloca());
3399
3400 IRBuilder<> IRB(APC.InsBefore);
3401 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3402 // Dynamic allocas will be unpoisoned unconditionally below in
3403 // unpoisonDynamicAllocas.
3404 // Flag that we need unpoison static allocas.
3405 }
3406
3407 // Handle dynamic allocas.
3408 createDynamicAllocasInitStorage();
3409 for (auto &AI : DynamicAllocaVec)
3410 handleDynamicAllocaCall(AI);
3411 unpoisonDynamicAllocas();
3412}
3413
3414/// Collect instructions in the entry block after \p InsBefore which initialize
3415/// permanent storage for a function argument. These instructions must remain in
3416/// the entry block so that uninitialized values do not appear in backtraces. An
3417/// added benefit is that this conserves spill slots. This does not move stores
3418/// before instrumented / "interesting" allocas.
3420 AddressSanitizer &ASan, Instruction &InsBefore,
3421 SmallVectorImpl<Instruction *> &InitInsts) {
3422 Instruction *Start = InsBefore.getNextNode();
3423 for (Instruction *It = Start; It; It = It->getNextNode()) {
3424 // Argument initialization looks like:
3425 // 1) store <Argument>, <Alloca> OR
3426 // 2) <CastArgument> = cast <Argument> to ...
3427 // store <CastArgument> to <Alloca>
3428 // Do not consider any other kind of instruction.
3429 //
3430 // Note: This covers all known cases, but may not be exhaustive. An
3431 // alternative to pattern-matching stores is to DFS over all Argument uses:
3432 // this might be more general, but is probably much more complicated.
3433 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3434 continue;
3435 if (auto *Store = dyn_cast<StoreInst>(It)) {
3436 // The store destination must be an alloca that isn't interesting for
3437 // ASan to instrument. These are moved up before InsBefore, and they're
3438 // not interesting because allocas for arguments can be mem2reg'd.
3439 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3440 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3441 continue;
3442
3443 Value *Val = Store->getValueOperand();
3444 bool IsDirectArgInit = isa<Argument>(Val);
3445 bool IsArgInitViaCast =
3446 isa<CastInst>(Val) &&
3447 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3448 // Check that the cast appears directly before the store. Otherwise
3449 // moving the cast before InsBefore may break the IR.
3450 Val == It->getPrevNode();
3451 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3452 if (!IsArgInit)
3453 continue;
3454
3455 if (IsArgInitViaCast)
3456 InitInsts.push_back(cast<Instruction>(Val));
3457 InitInsts.push_back(Store);
3458 continue;
3459 }
3460
3461 // Do not reorder past unknown instructions: argument initialization should
3462 // only involve casts and stores.
3463 return;
3464 }
3465}
3466
3468 // Alloca could have been renamed for uniqueness. Its true name will have been
3469 // recorded as an annotation.
3470 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3471 MDTuple *AllocaAnnotations =
3472 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3473 for (auto &Annotation : AllocaAnnotations->operands()) {
3474 if (!isa<MDTuple>(Annotation))
3475 continue;
3476 auto AnnotationTuple = cast<MDTuple>(Annotation);
3477 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3478 Index++) {
3479 // All annotations are strings
3480 auto MetadataString =
3481 cast<MDString>(AnnotationTuple->getOperand(Index));
3482 if (MetadataString->getString() == "alloca_name_altered")
3483 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3484 ->getString();
3485 }
3486 }
3487 }
3488 return AI->getName();
3489}
3490
3491void FunctionStackPoisoner::processStaticAllocas() {
3492 if (AllocaVec.empty()) {
3493 assert(StaticAllocaPoisonCallVec.empty());
3494 return;
3495 }
3496
3497 int StackMallocIdx = -1;
3498 DebugLoc EntryDebugLocation;
3499 if (auto SP = F.getSubprogram())
3500 EntryDebugLocation =
3501 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3502
3503 Instruction *InsBefore = AllocaVec[0];
3504 IRBuilder<> IRB(InsBefore);
3505
3506 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3507 // debug info is broken, because only entry-block allocas are treated as
3508 // regular stack slots.
3509 auto InsBeforeB = InsBefore->getParent();
3510 assert(InsBeforeB == &F.getEntryBlock());
3511 for (auto *AI : StaticAllocasToMoveUp)
3512 if (AI->getParent() == InsBeforeB)
3513 AI->moveBefore(InsBefore->getIterator());
3514
3515 // Move stores of arguments into entry-block allocas as well. This prevents
3516 // extra stack slots from being generated (to house the argument values until
3517 // they can be stored into the allocas). This also prevents uninitialized
3518 // values from being shown in backtraces.
3519 SmallVector<Instruction *, 8> ArgInitInsts;
3520 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3521 for (Instruction *ArgInitInst : ArgInitInsts)
3522 ArgInitInst->moveBefore(InsBefore->getIterator());
3523
3524 // If we have a call to llvm.localescape, keep it in the entry block.
3525 if (LocalEscapeCall)
3526 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3527
3529 SVD.reserve(AllocaVec.size());
3530 for (AllocaInst *AI : AllocaVec) {
3533 ASan.getAllocaSizeInBytes(*AI),
3534 0,
3535 AI->getAlign().value(),
3536 AI,
3537 0,
3538 0};
3539 SVD.push_back(D);
3540 }
3541
3542 // Minimal header size (left redzone) is 4 pointers,
3543 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3544 uint64_t Granularity = 1ULL << Mapping.Scale;
3545 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3546 const ASanStackFrameLayout &L =
3547 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3548
3549 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3551 for (auto &Desc : SVD)
3552 AllocaToSVDMap[Desc.AI] = &Desc;
3553
3554 // Update SVD with information from lifetime intrinsics.
3555 for (const auto &APC : StaticAllocaPoisonCallVec) {
3556 assert(APC.InsBefore);
3557 assert(APC.AI);
3558 assert(ASan.isInterestingAlloca(*APC.AI));
3559 assert(APC.AI->isStaticAlloca());
3560
3561 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3562 Desc.LifetimeSize = Desc.Size;
3563 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3564 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3565 if (LifetimeLoc->getFile() == FnLoc->getFile())
3566 if (unsigned Line = LifetimeLoc->getLine())
3567 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3568 }
3569 }
3570 }
3571
3572 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3573 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3574 uint64_t LocalStackSize = L.FrameSize;
3575 bool DoStackMalloc =
3576 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3577 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3578 bool DoDynamicAlloca = ClDynamicAllocaStack;
3579 // Don't do dynamic alloca or stack malloc if:
3580 // 1) There is inline asm: too often it makes assumptions on which registers
3581 // are available.
3582 // 2) There is a returns_twice call (typically setjmp), which is
3583 // optimization-hostile, and doesn't play well with introduced indirect
3584 // register-relative calculation of local variable addresses.
3585 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3586 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3587
3588 Type *PtrTy = F.getDataLayout().getAllocaPtrType(F.getContext());
3589 Value *StaticAlloca =
3590 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3591
3592 Value *FakeStackPtr;
3593 Value *FakeStackInt;
3594 Value *LocalStackBase;
3595 Value *LocalStackBaseAlloca;
3596 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3597
3598 if (DoStackMalloc) {
3599 LocalStackBaseAlloca =
3600 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3601 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3602 // void *FakeStack = __asan_option_detect_stack_use_after_return
3603 // ? __asan_stack_malloc_N(LocalStackSize)
3604 // : nullptr;
3605 // void *LocalStackBase = (FakeStack) ? FakeStack :
3606 // alloca(LocalStackSize);
3607 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3609 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3610 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3612 Instruction *Term =
3613 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3614 IRBuilder<> IRBIf(Term);
3615 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3616 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3617 Value *FakeStackValue =
3618 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3619 ConstantInt::get(IntptrTy, LocalStackSize));
3620 IRB.SetInsertPoint(InsBefore);
3621 FakeStackInt = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue,
3622 Term, ConstantInt::get(IntptrTy, 0));
3623 } else {
3624 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3625 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3626 // void *LocalStackBase = (FakeStack) ? FakeStack :
3627 // alloca(LocalStackSize);
3628 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3629 FakeStackInt =
3630 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3631 ConstantInt::get(IntptrTy, LocalStackSize));
3632 }
3633 FakeStackPtr = IRB.CreateIntToPtr(FakeStackInt, PtrTy);
3634 Value *NoFakeStack =
3635 IRB.CreateICmpEQ(FakeStackInt, Constant::getNullValue(IntptrTy));
3636 Instruction *Term =
3637 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3638 IRBuilder<> IRBIf(Term);
3639 Value *AllocaValue =
3640 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3641
3642 IRB.SetInsertPoint(InsBefore);
3643 LocalStackBase =
3644 createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStackPtr);
3645 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3646 DIExprFlags |= DIExpression::DerefBefore;
3647 } else {
3648 // void *FakeStack = nullptr;
3649 // void *LocalStackBase = alloca(LocalStackSize);
3650 FakeStackInt = Constant::getNullValue(IntptrTy);
3651 FakeStackPtr = Constant::getNullValue(PtrTy);
3652 LocalStackBase =
3653 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3654 LocalStackBaseAlloca = LocalStackBase;
3655 }
3656
3657 // Replace Alloca instructions with base+offset.
3658 SmallVector<Value *> NewAllocaPtrs;
3659 for (const auto &Desc : SVD) {
3660 AllocaInst *AI = Desc.AI;
3661 replaceDbgDeclare(AI, LocalStackBaseAlloca, DIB, DIExprFlags, Desc.Offset);
3662 Value *NewAllocaPtr = IRB.CreatePtrAdd(
3663 LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset));
3664 AI->replaceAllUsesWith(NewAllocaPtr);
3665 NewAllocaPtrs.push_back(NewAllocaPtr);
3666 }
3667
3668 // The left-most redzone has enough space for at least 4 pointers.
3669 // Write the Magic value to redzone[0].
3670 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3671 LocalStackBase);
3672 // Write the frame description constant to redzone[1].
3673 Value *BasePlus1 = IRB.CreatePtrAdd(
3674 LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize / 8));
3675 GlobalVariable *StackDescriptionGlobal =
3676 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3677 /*AllowMerging*/ true, genName("stack"));
3678 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3679 IRB.CreateStore(Description, BasePlus1);
3680 // Write the PC to redzone[2].
3681 Value *BasePlus2 = IRB.CreatePtrAdd(
3682 LocalStackBase, ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8));
3683 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3684
3685 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3686
3687 // Poison the stack red zones at the entry.
3688 Value *ShadowBase =
3689 ASan.memToShadow(IRB.CreatePtrToInt(LocalStackBase, IntptrTy), IRB);
3690 // As mask we must use most poisoned case: red zones and after scope.
3691 // As bytes we can use either the same or just red zones only.
3692 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3693
3694 if (!StaticAllocaPoisonCallVec.empty()) {
3695 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3696
3697 // Poison static allocas near lifetime intrinsics.
3698 for (const auto &APC : StaticAllocaPoisonCallVec) {
3699 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3700 assert(Desc.Offset % L.Granularity == 0);
3701 size_t Begin = Desc.Offset / L.Granularity;
3702 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3703
3704 IRBuilder<> IRB(APC.InsBefore);
3705 copyToShadow(ShadowAfterScope,
3706 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3707 IRB, ShadowBase);
3708 }
3709 }
3710
3711 // Remove lifetime markers now that these are no longer allocas.
3712 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3713 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3714 auto *I = cast<Instruction>(U);
3715 if (I->isLifetimeStartOrEnd())
3716 I->eraseFromParent();
3717 }
3718 }
3719
3720 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3721 SmallVector<uint8_t, 64> ShadowAfterReturn;
3722
3723 // (Un)poison the stack before all ret instructions.
3724 for (Instruction *Ret : RetVec) {
3725 IRBuilder<> IRBRet(Ret);
3726 // Mark the current frame as retired.
3727 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3728 LocalStackBase);
3729 if (DoStackMalloc) {
3730 assert(StackMallocIdx >= 0);
3731 // if FakeStack != 0 // LocalStackBase == FakeStack
3732 // // In use-after-return mode, poison the whole stack frame.
3733 // if StackMallocIdx <= 4
3734 // // For small sizes inline the whole thing:
3735 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3736 // **SavedFlagPtr(FakeStack) = 0
3737 // else
3738 // __asan_stack_free_N(FakeStack, LocalStackSize)
3739 // else
3740 // <This is not a fake stack; unpoison the redzones>
3741 Value *Cmp =
3742 IRBRet.CreateICmpNE(FakeStackInt, Constant::getNullValue(IntptrTy));
3743 Instruction *ThenTerm, *ElseTerm;
3744 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3745
3746 IRBuilder<> IRBPoison(ThenTerm);
3747 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3748 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3749 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3751 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3752 ShadowBase);
3753 Value *SavedFlagPtrPtr = IRBPoison.CreatePtrAdd(
3754 FakeStackPtr,
3755 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3756 Value *SavedFlagPtr = IRBPoison.CreateLoad(IntptrTy, SavedFlagPtrPtr);
3757 IRBPoison.CreateStore(
3758 Constant::getNullValue(IRBPoison.getInt8Ty()),
3759 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3760 } else {
3761 // For larger frames call __asan_stack_free_*.
3762 RTCI.createRuntimeCall(
3763 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3764 {FakeStackInt, ConstantInt::get(IntptrTy, LocalStackSize)});
3765 }
3766
3767 IRBuilder<> IRBElse(ElseTerm);
3768 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3769 } else {
3770 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3771 }
3772 }
3773
3774 // We are done. Remove the old unused alloca instructions.
3775 for (auto *AI : AllocaVec)
3776 AI->eraseFromParent();
3777}
3778
3779void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3780 IRBuilder<> &IRB, bool DoPoison) {
3781 // For now just insert the call to ASan runtime.
3782 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3783 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3784 RTCI.createRuntimeCall(
3785 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3786 {AddrArg, SizeArg});
3787}
3788
3789// Handling llvm.lifetime intrinsics for a given %alloca:
3790// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3791// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3792// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3793// could be poisoned by previous llvm.lifetime.end instruction, as the
3794// variable may go in and out of scope several times, e.g. in loops).
3795// (3) if we poisoned at least one %alloca in a function,
3796// unpoison the whole stack frame at function exit.
3797void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3798 IRBuilder<> IRB(AI);
3799
3800 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3801 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3802
3803 Value *Zero = Constant::getNullValue(IntptrTy);
3804 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3805 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3806
3807 // Since we need to extend alloca with additional memory to locate
3808 // redzones, and OldSize is number of allocated blocks with
3809 // ElementSize size, get allocated memory size in bytes by
3810 // OldSize * ElementSize.
3811 const unsigned ElementSize =
3812 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3813 Value *OldSize =
3814 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3815 ConstantInt::get(IntptrTy, ElementSize));
3816
3817 // PartialSize = OldSize % 32
3818 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3819
3820 // Misalign = kAllocaRzSize - PartialSize;
3821 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3822
3823 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3824 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3825 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3826
3827 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3828 // Alignment is added to locate left redzone, PartialPadding for possible
3829 // partial redzone and kAllocaRzSize for right redzone respectively.
3830 Value *AdditionalChunkSize = IRB.CreateAdd(
3831 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3832 PartialPadding);
3833
3834 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3835
3836 // Insert new alloca with new NewSize and Alignment params.
3837 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3838 NewAlloca->setAlignment(Alignment);
3839
3840 // NewAddress = Address + Alignment
3841 Value *NewAddress =
3842 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3843 ConstantInt::get(IntptrTy, Alignment.value()));
3844
3845 // Insert __asan_alloca_poison call for new created alloca.
3846 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3847
3848 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3849 // for unpoisoning stuff.
3850 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3851
3852 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3853
3854 // Remove lifetime markers now that this is no longer an alloca.
3855 for (User *U : make_early_inc_range(AI->users())) {
3856 auto *I = cast<Instruction>(U);
3857 if (I->isLifetimeStartOrEnd())
3858 I->eraseFromParent();
3859 }
3860
3861 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3862 AI->replaceAllUsesWith(NewAddressPtr);
3863
3864 // We are done. Erase old alloca from parent.
3865 AI->eraseFromParent();
3866}
3867
3868// isSafeAccess returns true if Addr is always inbounds with respect to its
3869// base object. For example, it is a field access or an array access with
3870// constant inbounds index.
3871bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3872 Value *Addr, TypeSize TypeStoreSize) const {
3873 if (TypeStoreSize.isScalable())
3874 // TODO: We can use vscale_range to convert a scalable value to an
3875 // upper bound on the access size.
3876 return false;
3877
3878 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3879 if (!SizeOffset.bothKnown())
3880 return false;
3881
3882 uint64_t Size = SizeOffset.Size.getZExtValue();
3883 int64_t Offset = SizeOffset.Offset.getSExtValue();
3884
3885 // Three checks are required to ensure safety:
3886 // . Offset >= 0 (since the offset is given from the base ptr)
3887 // . Size >= Offset (unsigned)
3888 // . Size - Offset >= NeededSize (unsigned)
3889 return Offset >= 0 && Size >= uint64_t(Offset) &&
3890 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3891}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define G(x, y, z)
Definition MD5.cpp:56
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:270
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCannotMerge()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition Comdat.h:38
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1274
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:50
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition Function.cpp:380
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:598
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:214
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition Globals.cpp:553
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1833
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:547
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1867
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2254
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2360
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2202
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1513
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:567
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2336
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1926
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2497
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1808
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2332
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition IRBuilder.h:533
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1850
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1863
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2197
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition IRBuilder.h:2659
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2511
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2280
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1886
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:552
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2212
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Metadata node.
Definition Metadata.h:1078
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Tuple of metadata.
Definition Metadata.h:1497
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:414
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
EltTy front() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition Triple.h:917
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition Triple.h:601
bool isOSNetBSD() const
Definition Triple.h:631
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:826
bool isABIN32() const
Definition Triple.h:1142
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition Triple.h:1038
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:412
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition Triple.h:1027
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition Triple.h:1033
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:680
@ UnknownObjectFormat
Definition Triple.h:319
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition Triple.h:922
bool isOSLinux() const
Tests whether the OS is Linux.
Definition Triple.h:729
bool isAMDGPU() const
Definition Triple.h:914
bool isMacOSX() const
Is this a Mac OS X triple.
Definition Triple.h:567
bool isOSFreeBSD() const
Definition Triple.h:639
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition Triple.h:749
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition Triple.h:586
bool isiOS() const
Is this an iOS triple.
Definition Triple.h:576
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition Triple.h:823
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition Triple.h:1126
bool isOSFuchsia() const
Definition Triple.h:643
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition Triple.h:670
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:503
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
TargetTransformInfo TTI
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3861
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition Demangle.cpp:20
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1942
#define N
LLVM_ABI ASanAccessInfo(int32_t Packed)
const uint8_t AccessSizeIndex
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.