LLVM 23.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
23#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Twine.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Comdat.h"
41#include "llvm/IR/Constant.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DIBuilder.h"
44#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/DebugLoc.h"
49#include "llvm/IR/Function.h"
50#include "llvm/IR/GlobalAlias.h"
51#include "llvm/IR/GlobalValue.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InlineAsm.h"
55#include "llvm/IR/InstVisitor.h"
56#include "llvm/IR/InstrTypes.h"
57#include "llvm/IR/Instruction.h"
60#include "llvm/IR/Intrinsics.h"
61#include "llvm/IR/LLVMContext.h"
62#include "llvm/IR/MDBuilder.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/Type.h"
66#include "llvm/IR/Use.h"
67#include "llvm/IR/Value.h"
71#include "llvm/Support/Debug.h"
84#include <algorithm>
85#include <cassert>
86#include <cstddef>
87#include <cstdint>
88#include <iomanip>
89#include <limits>
90#include <sstream>
91#include <string>
92#include <tuple>
93
94using namespace llvm;
95
96#define DEBUG_TYPE "asan"
97
99static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
100static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
102 std::numeric_limits<uint64_t>::max();
103static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
105static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
106static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
107static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
108static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
109static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
110static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
111static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
112static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
114static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
115static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
116static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
117static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
118static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
119static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
120static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
121static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
122static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
124
125// The shadow memory space is dynamically allocated.
127
128static const size_t kMinStackMallocSize = 1 << 6; // 64B
129static const size_t kMaxStackMallocSize = 1 << 16; // 64K
130static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
131static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
132
133const char kAsanModuleCtorName[] = "asan.module_ctor";
134const char kAsanModuleDtorName[] = "asan.module_dtor";
136// On Emscripten, the system needs more than one priorities for constructors.
138const char kAsanReportErrorTemplate[] = "__asan_report_";
139const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
140const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
141const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
143 "__asan_unregister_image_globals";
144const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
145const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
146const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
147const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
148const char kAsanInitName[] = "__asan_init";
149const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
150const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
151const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
152const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
153static const int kMaxAsanStackMallocSizeClass = 10;
154const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
156 "__asan_stack_malloc_always_";
157const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
158const char kAsanGenPrefix[] = "___asan_gen_";
159const char kODRGenPrefix[] = "__odr_asan_gen_";
160const char kSanCovGenPrefix[] = "__sancov_gen_";
161const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
162const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
163const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
164
165// ASan version script has __asan_* wildcard. Triple underscore prevents a
166// linker (gold) warning about attempting to export a local symbol.
167const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
168
170 "__asan_option_detect_stack_use_after_return";
171
173 "__asan_shadow_memory_dynamic_address";
174
175const char kAsanAllocaPoison[] = "__asan_alloca_poison";
176const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
177
178const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
179const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
180const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
181const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
182
183// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
184static const size_t kNumberOfAccessSizes = 5;
185
186static const uint64_t kAllocaRzSize = 32;
187
188// ASanAccessInfo implementation constants.
189constexpr size_t kCompileKernelShift = 0;
190constexpr size_t kCompileKernelMask = 0x1;
191constexpr size_t kAccessSizeIndexShift = 1;
192constexpr size_t kAccessSizeIndexMask = 0xf;
193constexpr size_t kIsWriteShift = 5;
194constexpr size_t kIsWriteMask = 0x1;
195
196// Command-line flags.
197
199 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
200 cl::Hidden, cl::init(false));
201
203 "asan-recover",
204 cl::desc("Enable recovery mode (continue-after-error)."),
205 cl::Hidden, cl::init(false));
206
208 "asan-guard-against-version-mismatch",
209 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
210 cl::init(true));
211
212// This flag may need to be replaced with -f[no-]asan-reads.
213static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
214 cl::desc("instrument read instructions"),
215 cl::Hidden, cl::init(true));
216
218 "asan-instrument-writes", cl::desc("instrument write instructions"),
219 cl::Hidden, cl::init(true));
220
221static cl::opt<bool>
222 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
223 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
225
227 "asan-instrument-atomics",
228 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
229 cl::init(true));
230
231static cl::opt<bool>
232 ClInstrumentByval("asan-instrument-byval",
233 cl::desc("instrument byval call arguments"), cl::Hidden,
234 cl::init(true));
235
237 "asan-always-slow-path",
238 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
239 cl::init(false));
240
242 "asan-force-dynamic-shadow",
243 cl::desc("Load shadow address into a local variable for each function"),
244 cl::Hidden, cl::init(false));
245
246static cl::opt<bool>
247 ClWithIfunc("asan-with-ifunc",
248 cl::desc("Access dynamic shadow through an ifunc global on "
249 "platforms that support this"),
250 cl::Hidden, cl::init(true));
251
252static cl::opt<int>
253 ClShadowAddrSpace("asan-shadow-addr-space",
254 cl::desc("Address space for pointers to the shadow map"),
255 cl::Hidden, cl::init(0));
256
258 "asan-with-ifunc-suppress-remat",
259 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
260 "it through inline asm in prologue."),
261 cl::Hidden, cl::init(true));
262
263// This flag limits the number of instructions to be instrumented
264// in any given BB. Normally, this should be set to unlimited (INT_MAX),
265// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
266// set it to 10000.
268 "asan-max-ins-per-bb", cl::init(10000),
269 cl::desc("maximal number of instructions to instrument in any given BB"),
270 cl::Hidden);
271
272// This flag may need to be replaced with -f[no]asan-stack.
273static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
274 cl::Hidden, cl::init(true));
276 "asan-max-inline-poisoning-size",
277 cl::desc(
278 "Inline shadow poisoning for blocks up to the given size in bytes."),
279 cl::Hidden, cl::init(64));
280
282 "asan-use-after-return",
283 cl::desc("Sets the mode of detection for stack-use-after-return."),
286 "Never detect stack use after return."),
289 "Detect stack use after return if "
290 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
292 "Always detect stack use after return.")),
294
295static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
296 cl::desc("Create redzones for byval "
297 "arguments (extra copy "
298 "required)"), cl::Hidden,
299 cl::init(true));
300
301static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
302 cl::desc("Check stack-use-after-scope"),
303 cl::Hidden, cl::init(false));
304
305// This flag may need to be replaced with -f[no]asan-globals.
306static cl::opt<bool> ClGlobals("asan-globals",
307 cl::desc("Handle global objects"), cl::Hidden,
308 cl::init(true));
309
310static cl::opt<bool> ClInitializers("asan-initialization-order",
311 cl::desc("Handle C++ initializer order"),
312 cl::Hidden, cl::init(true));
313
315 "asan-detect-invalid-pointer-pair",
316 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
317 cl::init(false));
318
320 "asan-detect-invalid-pointer-cmp",
321 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
322 cl::init(false));
323
325 "asan-detect-invalid-pointer-sub",
326 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
327 cl::init(false));
328
330 "asan-realign-stack",
331 cl::desc("Realign stack to the value of this flag (power of two)"),
332 cl::Hidden, cl::init(32));
333
335 "asan-instrumentation-with-call-threshold",
336 cl::desc("If the function being instrumented contains more than "
337 "this number of memory accesses, use callbacks instead of "
338 "inline checks (-1 means never use callbacks)."),
339 cl::Hidden, cl::init(7000));
340
342 "asan-memory-access-callback-prefix",
343 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
344 cl::init("__asan_"));
345
347 "asan-kernel-mem-intrinsic-prefix",
348 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
349 cl::init(false));
350
351static cl::opt<bool>
352 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
353 cl::desc("instrument dynamic allocas"),
354 cl::Hidden, cl::init(true));
355
357 "asan-skip-promotable-allocas",
358 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
359 cl::init(true));
360
362 "asan-constructor-kind",
363 cl::desc("Sets the ASan constructor kind"),
364 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
366 "Use global constructors")),
368// These flags allow to change the shadow mapping.
369// The shadow mapping looks like
370// Shadow = (Mem >> scale) + offset
371
372static cl::opt<int> ClMappingScale("asan-mapping-scale",
373 cl::desc("scale of asan shadow mapping"),
374 cl::Hidden, cl::init(0));
375
377 ClMappingOffset("asan-mapping-offset",
378 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
379 cl::Hidden, cl::init(0));
380
381// Optimization flags. Not user visible, used mostly for testing
382// and benchmarking the tool.
383
384static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
385 cl::Hidden, cl::init(true));
386
387static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
388 cl::desc("Optimize callbacks"),
389 cl::Hidden, cl::init(false));
390
392 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
393 cl::Hidden, cl::init(true));
394
395static cl::opt<bool> ClOptGlobals("asan-opt-globals",
396 cl::desc("Don't instrument scalar globals"),
397 cl::Hidden, cl::init(true));
398
400 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
401 cl::Hidden, cl::init(false));
402
404 "asan-stack-dynamic-alloca",
405 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
406 cl::init(true));
407
409 "asan-force-experiment",
410 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
411 cl::init(0));
412
413static cl::opt<bool>
414 ClUsePrivateAlias("asan-use-private-alias",
415 cl::desc("Use private aliases for global variables"),
416 cl::Hidden, cl::init(true));
417
418static cl::opt<bool>
419 ClUseOdrIndicator("asan-use-odr-indicator",
420 cl::desc("Use odr indicators to improve ODR reporting"),
421 cl::Hidden, cl::init(true));
422
423static cl::opt<bool>
424 ClUseGlobalsGC("asan-globals-live-support",
425 cl::desc("Use linker features to support dead "
426 "code stripping of globals"),
427 cl::Hidden, cl::init(true));
428
429// This is on by default even though there is a bug in gold:
430// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
431static cl::opt<bool>
432 ClWithComdat("asan-with-comdat",
433 cl::desc("Place ASan constructors in comdat sections"),
434 cl::Hidden, cl::init(true));
435
437 "asan-destructor-kind",
438 cl::desc("Sets the ASan destructor kind. The default is to use the value "
439 "provided to the pass constructor"),
440 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
442 "Use global destructors")),
444
447 "asan-instrument-address-spaces",
448 cl::desc("Only instrument variables in the specified address spaces."),
450 cl::callback([](const unsigned &AddrSpace) {
451 SrcAddrSpaces.insert(AddrSpace);
452 }));
453
454// Debug flags.
455
456static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
457 cl::init(0));
458
459static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
460 cl::Hidden, cl::init(0));
461
463 cl::desc("Debug func"));
464
465static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
466 cl::Hidden, cl::init(-1));
467
468static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
469 cl::Hidden, cl::init(-1));
470
471STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
472STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
473STATISTIC(NumOptimizedAccessesToGlobalVar,
474 "Number of optimized accesses to global vars");
475STATISTIC(NumOptimizedAccessesToStackVar,
476 "Number of optimized accesses to stack vars");
477
478namespace {
479
480/// This struct defines the shadow mapping using the rule:
481/// shadow = (mem >> Scale) ADD-or-OR Offset.
482/// If InGlobal is true, then
483/// extern char __asan_shadow[];
484/// shadow = (mem >> Scale) + &__asan_shadow
485struct ShadowMapping {
486 int Scale;
488 bool OrShadowOffset;
489 bool InGlobal;
490};
491
492} // end anonymous namespace
493
494static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
495 bool IsKasan) {
496 bool IsAndroid = TargetTriple.isAndroid();
497 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
498 TargetTriple.isDriverKit();
499 bool IsMacOS = TargetTriple.isMacOSX();
500 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
501 bool IsNetBSD = TargetTriple.isOSNetBSD();
502 bool IsPS = TargetTriple.isPS();
503 bool IsLinux = TargetTriple.isOSLinux();
504 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
505 TargetTriple.getArch() == Triple::ppc64le;
506 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
507 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
508 bool IsMIPSN32ABI = TargetTriple.isABIN32();
509 bool IsMIPS32 = TargetTriple.isMIPS32();
510 bool IsMIPS64 = TargetTriple.isMIPS64();
511 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
512 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
513 TargetTriple.getArch() == Triple::aarch64_be;
514 bool IsLoongArch64 = TargetTriple.isLoongArch64();
515 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
516 bool IsWindows = TargetTriple.isOSWindows();
517 bool IsFuchsia = TargetTriple.isOSFuchsia();
518 bool IsAMDGPU = TargetTriple.isAMDGPU();
519 bool IsHaiku = TargetTriple.isOSHaiku();
520 bool IsWasm = TargetTriple.isWasm();
521 bool IsBPF = TargetTriple.isBPF();
522
523 ShadowMapping Mapping;
524
525 Mapping.Scale = kDefaultShadowScale;
526 if (ClMappingScale.getNumOccurrences() > 0) {
527 Mapping.Scale = ClMappingScale;
528 }
529
530 if (LongSize == 32) {
531 if (IsAndroid)
532 Mapping.Offset = kDynamicShadowSentinel;
533 else if (IsMIPSN32ABI)
534 Mapping.Offset = kMIPS_ShadowOffsetN32;
535 else if (IsMIPS32)
536 Mapping.Offset = kMIPS32_ShadowOffset32;
537 else if (IsFreeBSD)
538 Mapping.Offset = kFreeBSD_ShadowOffset32;
539 else if (IsNetBSD)
540 Mapping.Offset = kNetBSD_ShadowOffset32;
541 else if (IsIOS)
542 Mapping.Offset = kDynamicShadowSentinel;
543 else if (IsWindows)
544 Mapping.Offset = kWindowsShadowOffset32;
545 else if (IsWasm)
546 Mapping.Offset = kWebAssemblyShadowOffset;
547 else
548 Mapping.Offset = kDefaultShadowOffset32;
549 } else { // LongSize == 64
550 // Fuchsia is always PIE, which means that the beginning of the address
551 // space is always available.
552 if (IsFuchsia) {
553 // kDynamicShadowSentinel tells instrumentation to use the dynamic shadow.
554 Mapping.Offset = kDynamicShadowSentinel;
555 } else if (IsPPC64)
556 Mapping.Offset = kPPC64_ShadowOffset64;
557 else if (IsSystemZ)
558 Mapping.Offset = kSystemZ_ShadowOffset64;
559 else if (IsFreeBSD && IsAArch64)
560 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
561 else if (IsFreeBSD && !IsMIPS64) {
562 if (IsKasan)
563 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
564 else
565 Mapping.Offset = kFreeBSD_ShadowOffset64;
566 } else if (IsNetBSD) {
567 if (IsKasan)
568 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
569 else
570 Mapping.Offset = kNetBSD_ShadowOffset64;
571 } else if (IsPS)
572 Mapping.Offset = kPS_ShadowOffset64;
573 else if (IsLinux && IsX86_64) {
574 if (IsKasan)
575 Mapping.Offset = kLinuxKasan_ShadowOffset64;
576 else
577 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
578 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
579 } else if (IsWindows && IsX86_64) {
580 Mapping.Offset = kWindowsShadowOffset64;
581 } else if (IsMIPS64)
582 Mapping.Offset = kMIPS64_ShadowOffset64;
583 else if (IsIOS)
584 Mapping.Offset = kDynamicShadowSentinel;
585 else if (IsMacOS && IsAArch64)
586 Mapping.Offset = kDynamicShadowSentinel;
587 else if (IsAArch64)
588 Mapping.Offset = kAArch64_ShadowOffset64;
589 else if (IsLoongArch64)
590 Mapping.Offset = kLoongArch64_ShadowOffset64;
591 else if (IsRISCV64)
592 Mapping.Offset = kRISCV64_ShadowOffset64;
593 else if (IsAMDGPU)
594 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
595 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
596 else if (IsHaiku && IsX86_64)
597 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
598 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
599 else if (IsBPF)
600 Mapping.Offset = kDynamicShadowSentinel;
601 else
602 Mapping.Offset = kDefaultShadowOffset64;
603 }
604
606 Mapping.Offset = kDynamicShadowSentinel;
607 }
608
609 if (ClMappingOffset.getNumOccurrences() > 0) {
610 Mapping.Offset = ClMappingOffset;
611 }
612
613 // OR-ing shadow offset if more efficient (at least on x86) if the offset
614 // is a power of two, but on ppc64 and loongarch64 we have to use add since
615 // the shadow offset is not necessarily 1/8-th of the address space. On
616 // SystemZ, we could OR the constant in a single instruction, but it's more
617 // efficient to load it once and use indexed addressing.
618 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
619 !IsRISCV64 && !IsLoongArch64 &&
620 !(Mapping.Offset & (Mapping.Offset - 1)) &&
621 Mapping.Offset != kDynamicShadowSentinel;
622 Mapping.InGlobal = ClWithIfunc && IsAndroid && IsArmOrThumb;
623
624 return Mapping;
625}
626
627void llvm::getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
628 bool IsKasan, uint64_t *ShadowBase,
629 int *MappingScale, bool *OrShadowOffset) {
630 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
631 *ShadowBase = Mapping.Offset;
632 *MappingScale = Mapping.Scale;
633 *OrShadowOffset = Mapping.OrShadowOffset;
634}
635
637 // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
638 //
639 // This is not only true for sanitized functions, because AttrInfer can
640 // infer those attributes on libc functions, which is not true if those
641 // are instrumented (Android) or intercepted.
642 //
643 // We might want to model ASan shadow memory more opaquely to get rid of
644 // this problem altogether, by hiding the shadow memory write in an
645 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
646 // for another day.
647
648 // The API is weird. `onlyReadsMemory` actually means "does not write", and
649 // `onlyWritesMemory` actually means "does not read". So we reconstruct
650 // "accesses memory" && "does not read" <=> "writes".
651 bool Changed = false;
652 if (!F.doesNotAccessMemory()) {
653 bool WritesMemory = !F.onlyReadsMemory();
654 bool ReadsMemory = !F.onlyWritesMemory();
655 if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
656 F.removeFnAttr(Attribute::Memory);
657 Changed = true;
658 }
659 }
660 if (ReadsArgMem) {
661 for (Argument &A : F.args()) {
662 if (A.hasAttribute(Attribute::WriteOnly)) {
663 A.removeAttr(Attribute::WriteOnly);
664 Changed = true;
665 }
666 }
667 }
668 if (Changed) {
669 // nobuiltin makes sure later passes don't restore assumptions about
670 // the function.
671 F.addFnAttr(Attribute::NoBuiltin);
672 }
673}
674
680
688
689static uint64_t getRedzoneSizeForScale(int MappingScale) {
690 // Redzone used for stack and globals is at least 32 bytes.
691 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
692 return std::max(32U, 1U << MappingScale);
693}
694
696 if (TargetTriple.isOSEmscripten())
698 else
700}
701
702static Twine genName(StringRef suffix) {
703 return Twine(kAsanGenPrefix) + suffix;
704}
705
706namespace {
707/// Helper RAII class to post-process inserted asan runtime calls during a
708/// pass on a single Function. Upon end of scope, detects and applies the
709/// required funclet OpBundle.
710class RuntimeCallInserter {
711 Function *OwnerFn = nullptr;
712 bool TrackInsertedCalls = false;
713 SmallVector<CallInst *> InsertedCalls;
714
715public:
716 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
717 if (Fn.hasPersonalityFn()) {
718 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
719 if (isScopedEHPersonality(Personality))
720 TrackInsertedCalls = true;
721 }
722 }
723
724 ~RuntimeCallInserter() {
725 if (InsertedCalls.empty())
726 return;
727 assert(TrackInsertedCalls && "Calls were wrongly tracked");
728
729 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
730 for (CallInst *CI : InsertedCalls) {
731 BasicBlock *BB = CI->getParent();
732 assert(BB && "Instruction doesn't belong to a BasicBlock");
733 assert(BB->getParent() == OwnerFn &&
734 "Instruction doesn't belong to the expected Function!");
735
736 ColorVector &Colors = BlockColors[BB];
737 // funclet opbundles are only valid in monochromatic BBs.
738 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
739 // and will be DCE'ed later.
740 if (Colors.empty())
741 continue;
742 if (Colors.size() != 1) {
743 OwnerFn->getContext().emitError(
744 "Instruction's BasicBlock is not monochromatic");
745 continue;
746 }
747
748 BasicBlock *Color = Colors.front();
749 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
750
751 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
752 // Replace CI with a clone with an added funclet OperandBundle
753 OperandBundleDef OB("funclet", &*EHPadIt);
755 OB, CI->getIterator());
756 NewCall->copyMetadata(*CI);
757 CI->replaceAllUsesWith(NewCall);
758 CI->eraseFromParent();
759 }
760 }
761 }
762
763 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
764 ArrayRef<Value *> Args = {},
765 const Twine &Name = "") {
766 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
767
768 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
769 if (TrackInsertedCalls)
770 InsertedCalls.push_back(Inst);
771 return Inst;
772 }
773};
774
775/// AddressSanitizer: instrument the code in module to find memory bugs.
776struct AddressSanitizer {
777 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
778 int InstrumentationWithCallsThreshold,
779 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
780 bool Recover = false, bool UseAfterScope = false,
781 AsanDetectStackUseAfterReturnMode UseAfterReturn =
782 AsanDetectStackUseAfterReturnMode::Runtime)
783 : M(M),
784 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
785 : CompileKernel),
786 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
787 UseAfterScope(UseAfterScope || ClUseAfterScope),
788 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
789 : UseAfterReturn),
790 SSGI(SSGI),
791 InstrumentationWithCallsThreshold(
792 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
794 : InstrumentationWithCallsThreshold),
795 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
797 : MaxInlinePoisoningSize) {
798 C = &(M.getContext());
799 DL = &M.getDataLayout();
800 LongSize = M.getDataLayout().getPointerSizeInBits();
801 IntptrTy = Type::getIntNTy(*C, LongSize);
802 PtrTy = PointerType::getUnqual(*C);
803 Int32Ty = Type::getInt32Ty(*C);
804 TargetTriple = M.getTargetTriple();
805
806 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
807
808 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
809 }
810
811 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
812 return *AI.getAllocationSize(AI.getDataLayout());
813 }
814
815 /// Check if we want (and can) handle this alloca.
816 bool isInterestingAlloca(const AllocaInst &AI);
817
818 bool ignoreAccess(Instruction *Inst, Value *Ptr);
820 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
821 const TargetTransformInfo *TTI);
822
823 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
824 InterestingMemoryOperand &O, bool UseCalls,
825 const DataLayout &DL, RuntimeCallInserter &RTCI);
826 void instrumentPointerComparisonOrSubtraction(Instruction *I,
827 RuntimeCallInserter &RTCI);
828 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
829 Value *Addr, MaybeAlign Alignment,
830 uint32_t TypeStoreSize, bool IsWrite,
831 Value *SizeArgument, bool UseCalls, uint32_t Exp,
832 RuntimeCallInserter &RTCI);
833 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
834 Instruction *InsertBefore, Value *Addr,
835 uint32_t TypeStoreSize, bool IsWrite,
836 Value *SizeArgument);
837 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
838 bool Recover);
839 void instrumentUnusualSizeOrAlignment(Instruction *I,
840 Instruction *InsertBefore, Value *Addr,
841 TypeSize TypeStoreSize, bool IsWrite,
842 Value *SizeArgument, bool UseCalls,
843 uint32_t Exp,
844 RuntimeCallInserter &RTCI);
845 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
846 Type *IntptrTy, Value *Mask, Value *EVL,
847 Value *Stride, Instruction *I, Value *Addr,
848 MaybeAlign Alignment, unsigned Granularity,
849 Type *OpType, bool IsWrite,
850 Value *SizeArgument, bool UseCalls,
851 uint32_t Exp, RuntimeCallInserter &RTCI);
852 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
853 Value *ShadowValue, uint32_t TypeStoreSize);
854 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
855 bool IsWrite, size_t AccessSizeIndex,
856 Value *SizeArgument, uint32_t Exp,
857 RuntimeCallInserter &RTCI);
858 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
859 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
860 bool suppressInstrumentationSiteForDebug(int &Instrumented);
861 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
862 const TargetTransformInfo *TTI);
863 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
864 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
865 void markEscapedLocalAllocas(Function &F);
866 void markCatchParametersAsUninteresting(Function &F);
867
868private:
869 friend struct FunctionStackPoisoner;
870
871 void initializeCallbacks(const TargetLibraryInfo *TLI);
872
873 bool LooksLikeCodeInBug11395(Instruction *I);
874 bool GlobalIsLinkerInitialized(GlobalVariable *G);
875 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
876 TypeSize TypeStoreSize) const;
877
878 /// Helper to cleanup per-function state.
879 struct FunctionStateRAII {
880 AddressSanitizer *Pass;
881
882 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
883 assert(Pass->ProcessedAllocas.empty() &&
884 "last pass forgot to clear cache");
885 assert(!Pass->LocalDynamicShadow);
886 }
887
888 ~FunctionStateRAII() {
889 Pass->LocalDynamicShadow = nullptr;
890 Pass->ProcessedAllocas.clear();
891 }
892 };
893
894 Module &M;
895 LLVMContext *C;
896 const DataLayout *DL;
897 Triple TargetTriple;
898 int LongSize;
899 bool CompileKernel;
900 bool Recover;
901 bool UseAfterScope;
903 Type *IntptrTy;
904 Type *Int32Ty;
905 PointerType *PtrTy;
906 ShadowMapping Mapping;
907 FunctionCallee AsanHandleNoReturnFunc;
908 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
909 Constant *AsanShadowGlobal;
910
911 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
912 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
913 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
914
915 // These arrays is indexed by AccessIsWrite and Experiment.
916 FunctionCallee AsanErrorCallbackSized[2][2];
917 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
918
919 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
920 Value *LocalDynamicShadow = nullptr;
921 const StackSafetyGlobalInfo *SSGI;
922 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
923
924 FunctionCallee AMDGPUAddressShared;
925 FunctionCallee AMDGPUAddressPrivate;
926 int InstrumentationWithCallsThreshold;
927 uint32_t MaxInlinePoisoningSize;
928};
929
930class ModuleAddressSanitizer {
931public:
932 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
933 bool CompileKernel = false, bool Recover = false,
934 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
935 AsanDtorKind DestructorKind = AsanDtorKind::Global,
936 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
937 : M(M),
938 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
939 : CompileKernel),
940 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
942 : InsertVersionCheck),
943 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
944 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
945 // Enable aliases as they should have no downside with ODR indicators.
946 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
948 : UseOdrIndicator),
949 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
951 : UseOdrIndicator),
952 // Not a typo: ClWithComdat is almost completely pointless without
953 // ClUseGlobalsGC (because then it only works on modules without
954 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
955 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
956 // argument is designed as workaround. Therefore, disable both
957 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
958 // do globals-gc.
959 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
960 DestructorKind(DestructorKind),
961 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
963 : ConstructorKind) {
964 C = &(M.getContext());
965 int LongSize = M.getDataLayout().getPointerSizeInBits();
966 IntptrTy = Type::getIntNTy(*C, LongSize);
967 PtrTy = PointerType::getUnqual(*C);
968 TargetTriple = M.getTargetTriple();
969 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
970
971 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
972 this->DestructorKind = ClOverrideDestructorKind;
973 assert(this->DestructorKind != AsanDtorKind::Invalid);
974 }
975
976 bool instrumentModule();
977
978private:
979 void initializeCallbacks();
980
981 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
982 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
983 ArrayRef<GlobalVariable *> ExtendedGlobals,
984 ArrayRef<Constant *> MetadataInitializers);
985 void instrumentGlobalsELF(IRBuilder<> &IRB,
986 ArrayRef<GlobalVariable *> ExtendedGlobals,
987 ArrayRef<Constant *> MetadataInitializers,
988 const std::string &UniqueModuleId);
989 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
990 ArrayRef<GlobalVariable *> ExtendedGlobals,
991 ArrayRef<Constant *> MetadataInitializers);
992 void
993 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
994 ArrayRef<GlobalVariable *> ExtendedGlobals,
995 ArrayRef<Constant *> MetadataInitializers);
996
997 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
998 StringRef OriginalName);
999 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
1000 StringRef InternalSuffix);
1001 Instruction *CreateAsanModuleDtor();
1002
1003 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
1004 bool shouldInstrumentGlobal(GlobalVariable *G) const;
1005 bool ShouldUseMachOGlobalsSection() const;
1006 StringRef getGlobalMetadataSection() const;
1007 void poisonOneInitializer(Function &GlobalInit);
1008 void createInitializerPoisonCalls();
1009 uint64_t getMinRedzoneSizeForGlobal() const {
1010 return getRedzoneSizeForScale(Mapping.Scale);
1011 }
1012 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
1013 int GetAsanVersion() const;
1014 GlobalVariable *getOrCreateModuleName();
1015
1016 Module &M;
1017 bool CompileKernel;
1018 bool InsertVersionCheck;
1019 bool Recover;
1020 bool UseGlobalsGC;
1021 bool UsePrivateAlias;
1022 bool UseOdrIndicator;
1023 bool UseCtorComdat;
1024 AsanDtorKind DestructorKind;
1025 AsanCtorKind ConstructorKind;
1026 Type *IntptrTy;
1027 PointerType *PtrTy;
1028 LLVMContext *C;
1029 Triple TargetTriple;
1030 ShadowMapping Mapping;
1031 FunctionCallee AsanPoisonGlobals;
1032 FunctionCallee AsanUnpoisonGlobals;
1033 FunctionCallee AsanRegisterGlobals;
1034 FunctionCallee AsanUnregisterGlobals;
1035 FunctionCallee AsanRegisterImageGlobals;
1036 FunctionCallee AsanUnregisterImageGlobals;
1037 FunctionCallee AsanRegisterElfGlobals;
1038 FunctionCallee AsanUnregisterElfGlobals;
1039
1040 Function *AsanCtorFunction = nullptr;
1041 Function *AsanDtorFunction = nullptr;
1042 GlobalVariable *ModuleName = nullptr;
1043};
1044
1045// Stack poisoning does not play well with exception handling.
1046// When an exception is thrown, we essentially bypass the code
1047// that unpoisones the stack. This is why the run-time library has
1048// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1049// stack in the interceptor. This however does not work inside the
1050// actual function which catches the exception. Most likely because the
1051// compiler hoists the load of the shadow value somewhere too high.
1052// This causes asan to report a non-existing bug on 453.povray.
1053// It sounds like an LLVM bug.
1054struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1055 Function &F;
1056 AddressSanitizer &ASan;
1057 RuntimeCallInserter &RTCI;
1058 DIBuilder DIB;
1059 LLVMContext *C;
1060 Type *IntptrTy;
1061 Type *IntptrPtrTy;
1062 ShadowMapping Mapping;
1063
1065 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1066 SmallVector<Instruction *, 8> RetVec;
1067
1068 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1069 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1070 FunctionCallee AsanSetShadowFunc[0x100] = {};
1071 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1072 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1073
1074 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1075 struct AllocaPoisonCall {
1076 IntrinsicInst *InsBefore;
1077 AllocaInst *AI;
1078 uint64_t Size;
1079 bool DoPoison;
1080 };
1081 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1082 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1083
1084 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1085 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1086 AllocaInst *DynamicAllocaLayout = nullptr;
1087 IntrinsicInst *LocalEscapeCall = nullptr;
1088
1089 bool HasInlineAsm = false;
1090 bool HasReturnsTwiceCall = false;
1091 bool PoisonStack;
1092
1093 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1094 RuntimeCallInserter &RTCI)
1095 : F(F), ASan(ASan), RTCI(RTCI),
1096 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1097 IntptrTy(ASan.IntptrTy),
1098 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1099 Mapping(ASan.Mapping),
1100 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1101
1102 bool runOnFunction() {
1103 if (!PoisonStack)
1104 return false;
1105
1107 copyArgsPassedByValToAllocas();
1108
1109 // Collect alloca, ret, lifetime instructions etc.
1110 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1111
1112 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1113
1114 initializeCallbacks(*F.getParent());
1115
1116 processDynamicAllocas();
1117 processStaticAllocas();
1118
1119 if (ClDebugStack) {
1120 LLVM_DEBUG(dbgs() << F);
1121 }
1122 return true;
1123 }
1124
1125 // Arguments marked with the "byval" attribute are implicitly copied without
1126 // using an alloca instruction. To produce redzones for those arguments, we
1127 // copy them a second time into memory allocated with an alloca instruction.
1128 void copyArgsPassedByValToAllocas();
1129
1130 // Finds all Alloca instructions and puts
1131 // poisoned red zones around all of them.
1132 // Then unpoison everything back before the function returns.
1133 void processStaticAllocas();
1134 void processDynamicAllocas();
1135
1136 void createDynamicAllocasInitStorage();
1137
1138 // ----------------------- Visitors.
1139 /// Collect all Ret instructions, or the musttail call instruction if it
1140 /// precedes the return instruction.
1141 void visitReturnInst(ReturnInst &RI) {
1142 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1143 RetVec.push_back(CI);
1144 else
1145 RetVec.push_back(&RI);
1146 }
1147
1148 /// Collect all Resume instructions.
1149 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1150
1151 /// Collect all CatchReturnInst instructions.
1152 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1153
1154 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1155 Value *SavedStack) {
1156 IRBuilder<> IRB(InstBefore);
1157 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1158 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1159 // need to adjust extracted SP to compute the address of the most recent
1160 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1161 // this purpose.
1162 if (!isa<ReturnInst>(InstBefore)) {
1163 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1164 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1165
1166 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1167 DynamicAreaOffset);
1168 }
1169
1170 RTCI.createRuntimeCall(
1171 IRB, AsanAllocasUnpoisonFunc,
1172 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1173 }
1174
1175 // Unpoison dynamic allocas redzones.
1176 void unpoisonDynamicAllocas() {
1177 for (Instruction *Ret : RetVec)
1178 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1179
1180 for (Instruction *StackRestoreInst : StackRestoreVec)
1181 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1182 StackRestoreInst->getOperand(0));
1183 }
1184
1185 // Deploy and poison redzones around dynamic alloca call. To do this, we
1186 // should replace this call with another one with changed parameters and
1187 // replace all its uses with new address, so
1188 // addr = alloca type, old_size, align
1189 // is replaced by
1190 // new_size = (old_size + additional_size) * sizeof(type)
1191 // tmp = alloca i8, new_size, max(align, 32)
1192 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1193 // Additional_size is added to make new memory allocation contain not only
1194 // requested memory, but also left, partial and right redzones.
1195 void handleDynamicAllocaCall(AllocaInst *AI);
1196
1197 /// Collect Alloca instructions we want (and can) handle.
1198 void visitAllocaInst(AllocaInst &AI) {
1199 // FIXME: Handle scalable vectors instead of ignoring them.
1200 const Type *AllocaType = AI.getAllocatedType();
1201 const auto *STy = dyn_cast<StructType>(AllocaType);
1202 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1203 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1204 if (AI.isStaticAlloca()) {
1205 // Skip over allocas that are present *before* the first instrumented
1206 // alloca, we don't want to move those around.
1207 if (AllocaVec.empty())
1208 return;
1209
1210 StaticAllocasToMoveUp.push_back(&AI);
1211 }
1212 return;
1213 }
1214
1215 if (!AI.isStaticAlloca())
1216 DynamicAllocaVec.push_back(&AI);
1217 else
1218 AllocaVec.push_back(&AI);
1219 }
1220
1221 /// Collect lifetime intrinsic calls to check for use-after-scope
1222 /// errors.
1223 void visitIntrinsicInst(IntrinsicInst &II) {
1224 Intrinsic::ID ID = II.getIntrinsicID();
1225 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1226 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1227 if (!ASan.UseAfterScope)
1228 return;
1229 if (!II.isLifetimeStartOrEnd())
1230 return;
1231 // Find alloca instruction that corresponds to llvm.lifetime argument.
1232 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1233 // We're interested only in allocas we can handle.
1234 if (!AI || !ASan.isInterestingAlloca(*AI))
1235 return;
1236
1237 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1238 // Check that size is known and can be stored in IntptrTy.
1239 // TODO: Add support for scalable vectors if possible.
1240 if (!Size || Size->isScalable() ||
1242 return;
1243
1244 bool DoPoison = (ID == Intrinsic::lifetime_end);
1245 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1246 if (AI->isStaticAlloca())
1247 StaticAllocaPoisonCallVec.push_back(APC);
1249 DynamicAllocaPoisonCallVec.push_back(APC);
1250 }
1251
1252 void visitCallBase(CallBase &CB) {
1253 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1254 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1255 HasReturnsTwiceCall |= CI->canReturnTwice();
1256 }
1257 }
1258
1259 // ---------------------- Helpers.
1260 void initializeCallbacks(Module &M);
1261
1262 // Copies bytes from ShadowBytes into shadow memory for indexes where
1263 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1264 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1265 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1266 IRBuilder<> &IRB, Value *ShadowBase);
1267 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1268 size_t Begin, size_t End, IRBuilder<> &IRB,
1269 Value *ShadowBase);
1270 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1271 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1272 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1273
1274 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1275
1276 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1277 bool Dynamic);
1278 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1279 Instruction *ThenTerm, Value *ValueIfFalse);
1280};
1281
1282} // end anonymous namespace
1283
1285 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1287 OS, MapClassName2PassName);
1288 OS << '<';
1289 if (Options.CompileKernel)
1290 OS << "kernel;";
1291 if (Options.UseAfterScope)
1292 OS << "use-after-scope";
1293 OS << '>';
1294}
1295
1297 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1298 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1299 AsanCtorKind ConstructorKind)
1300 : Options(Options), UseGlobalGC(UseGlobalGC),
1301 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1302 ConstructorKind(ConstructorKind) {}
1303
1306 // Return early if nosanitize_address module flag is present for the module.
1307 // This implies that asan pass has already run before.
1308 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1309 return PreservedAnalyses::all();
1310
1311 ModuleAddressSanitizer ModuleSanitizer(
1312 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1313 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1314 bool Modified = false;
1315 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1316 const StackSafetyGlobalInfo *const SSGI =
1317 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1318 for (Function &F : M) {
1319 if (F.empty())
1320 continue;
1321 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1322 continue;
1323 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1324 continue;
1325 if (F.getName().starts_with("__asan_"))
1326 continue;
1327 if (F.isPresplitCoroutine())
1328 continue;
1329 AddressSanitizer FunctionSanitizer(
1330 M, SSGI, Options.InstrumentationWithCallsThreshold,
1331 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1332 Options.UseAfterScope, Options.UseAfterReturn);
1333 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1334 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1335 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
1336 }
1337 Modified |= ModuleSanitizer.instrumentModule();
1338 if (!Modified)
1339 return PreservedAnalyses::all();
1340
1342 // GlobalsAA is considered stateless and does not get invalidated unless
1343 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1344 // make changes that require GlobalsAA to be invalidated.
1345 PA.abandon<GlobalsAA>();
1346 return PA;
1347}
1348
1350 size_t Res = llvm::countr_zero(TypeSize / 8);
1352 return Res;
1353}
1354
1355/// Check if \p G has been created by a trusted compiler pass.
1357 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1358 if (G->getName().starts_with("llvm.") ||
1359 // Do not instrument gcov counter arrays.
1360 G->getName().starts_with("__llvm_gcov_ctr") ||
1361 // Do not instrument rtti proxy symbols for function sanitizer.
1362 G->getName().starts_with("__llvm_rtti_proxy"))
1363 return true;
1364
1365 // Do not instrument asan globals.
1366 if (G->getName().starts_with(kAsanGenPrefix) ||
1367 G->getName().starts_with(kSanCovGenPrefix) ||
1368 G->getName().starts_with(kODRGenPrefix))
1369 return true;
1370
1371 return false;
1372}
1373
1375 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1376 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1377 // Globals in address space 1 and 4 are supported for AMDGPU.
1378 if (AddrSpace == 3 || AddrSpace == 5)
1379 return true;
1380 return false;
1381}
1382
1383static bool isSupportedAddrspace(const Triple &TargetTriple, Value *Addr) {
1384 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1385 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1386
1387 if (!SrcAddrSpaces.empty())
1388 return SrcAddrSpaces.count(AddrSpace);
1389
1390 if (TargetTriple.isAMDGPU())
1391 return !isUnsupportedAMDGPUAddrspace(Addr);
1392
1393 return AddrSpace == 0;
1394}
1395
1396Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1397 // Shadow >> scale
1398 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1399 if (Mapping.Offset == 0) return Shadow;
1400 // (Shadow >> scale) | offset
1401 Value *ShadowBase;
1402 if (LocalDynamicShadow)
1403 ShadowBase = LocalDynamicShadow;
1404 else
1405 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1406 if (Mapping.OrShadowOffset)
1407 return IRB.CreateOr(Shadow, ShadowBase);
1408 else
1409 return IRB.CreateAdd(Shadow, ShadowBase);
1410}
1411
1412// Instrument memset/memmove/memcpy
1413void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1414 RuntimeCallInserter &RTCI) {
1416 if (isa<MemTransferInst>(MI)) {
1417 RTCI.createRuntimeCall(
1418 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1419 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1420 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1421 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1422 } else if (isa<MemSetInst>(MI)) {
1423 RTCI.createRuntimeCall(
1424 IRB, AsanMemset,
1425 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1426 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1427 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1428 }
1429 MI->eraseFromParent();
1430}
1431
1432/// Check if we want (and can) handle this alloca.
1433bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1434 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1435
1436 if (!Inserted)
1437 return It->getSecond();
1438
1439 bool IsInteresting =
1440 (AI.getAllocatedType()->isSized() &&
1441 // alloca() may be called with 0 size, ignore it.
1442 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1443 // We are only interested in allocas not promotable to registers.
1444 // Promotable allocas are common under -O0.
1446 // inalloca allocas are not treated as static, and we don't want
1447 // dynamic alloca instrumentation for them as well.
1448 !AI.isUsedWithInAlloca() &&
1449 // swifterror allocas are register promoted by ISel
1450 !AI.isSwiftError() &&
1451 // safe allocas are not interesting
1452 !(SSGI && SSGI->isSafe(AI)));
1453
1454 It->second = IsInteresting;
1455 return IsInteresting;
1456}
1457
1458bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1459 // Check whether the target supports sanitizing the address space
1460 // of the pointer.
1461 if (!isSupportedAddrspace(TargetTriple, Ptr))
1462 return true;
1463
1464 // Ignore swifterror addresses.
1465 // swifterror memory addresses are mem2reg promoted by instruction
1466 // selection. As such they cannot have regular uses like an instrumentation
1467 // function and it makes no sense to track them as memory.
1468 if (Ptr->isSwiftError())
1469 return true;
1470
1471 // Treat memory accesses to promotable allocas as non-interesting since they
1472 // will not cause memory violations. This greatly speeds up the instrumented
1473 // executable at -O0.
1474 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1475 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1476 return true;
1477
1478 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1479 findAllocaForValue(Ptr))
1480 return true;
1481
1482 return false;
1483}
1484
1485void AddressSanitizer::getInterestingMemoryOperands(
1487 const TargetTransformInfo *TTI) {
1488 // Do not instrument the load fetching the dynamic shadow address.
1489 if (LocalDynamicShadow == I)
1490 return;
1491
1492 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1493 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1494 return;
1495 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1496 LI->getType(), LI->getAlign());
1497 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1498 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1499 return;
1500 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1501 SI->getValueOperand()->getType(), SI->getAlign());
1502 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1503 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1504 return;
1505 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1506 RMW->getValOperand()->getType(), std::nullopt);
1507 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1508 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1509 return;
1510 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1511 XCHG->getCompareOperand()->getType(),
1512 std::nullopt);
1513 } else if (auto CI = dyn_cast<CallInst>(I)) {
1514 switch (CI->getIntrinsicID()) {
1515 case Intrinsic::masked_load:
1516 case Intrinsic::masked_store:
1517 case Intrinsic::masked_gather:
1518 case Intrinsic::masked_scatter: {
1519 bool IsWrite = CI->getType()->isVoidTy();
1520 // Masked store has an initial operand for the value.
1521 unsigned OpOffset = IsWrite ? 1 : 0;
1522 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1523 return;
1524
1525 auto BasePtr = CI->getOperand(OpOffset);
1526 if (ignoreAccess(I, BasePtr))
1527 return;
1528 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1529 MaybeAlign Alignment = CI->getParamAlign(0);
1530 Value *Mask = CI->getOperand(1 + OpOffset);
1531 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1532 break;
1533 }
1534 case Intrinsic::masked_expandload:
1535 case Intrinsic::masked_compressstore: {
1536 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1537 unsigned OpOffset = IsWrite ? 1 : 0;
1538 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1539 return;
1540 auto BasePtr = CI->getOperand(OpOffset);
1541 if (ignoreAccess(I, BasePtr))
1542 return;
1543 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1544 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1545
1546 IRBuilder IB(I);
1547 Value *Mask = CI->getOperand(1 + OpOffset);
1548 // Use the popcount of Mask as the effective vector length.
1549 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1550 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1551 Value *EVL = IB.CreateAddReduce(ExtMask);
1552 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1553 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1554 EVL);
1555 break;
1556 }
1557 case Intrinsic::vp_load:
1558 case Intrinsic::vp_store:
1559 case Intrinsic::experimental_vp_strided_load:
1560 case Intrinsic::experimental_vp_strided_store: {
1561 auto *VPI = cast<VPIntrinsic>(CI);
1562 unsigned IID = CI->getIntrinsicID();
1563 bool IsWrite = CI->getType()->isVoidTy();
1564 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1565 return;
1566 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1567 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1568 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1569 Value *Stride = nullptr;
1570 if (IID == Intrinsic::experimental_vp_strided_store ||
1571 IID == Intrinsic::experimental_vp_strided_load) {
1572 Stride = VPI->getOperand(PtrOpNo + 1);
1573 // Use the pointer alignment as the element alignment if the stride is a
1574 // multiple of the pointer alignment. Otherwise, the element alignment
1575 // should be Align(1).
1576 unsigned PointerAlign = Alignment.valueOrOne().value();
1577 if (!isa<ConstantInt>(Stride) ||
1578 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1579 Alignment = Align(1);
1580 }
1581 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1582 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1583 Stride);
1584 break;
1585 }
1586 case Intrinsic::vp_gather:
1587 case Intrinsic::vp_scatter: {
1588 auto *VPI = cast<VPIntrinsic>(CI);
1589 unsigned IID = CI->getIntrinsicID();
1590 bool IsWrite = IID == Intrinsic::vp_scatter;
1591 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1592 return;
1593 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1594 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1595 MaybeAlign Alignment = VPI->getPointerAlignment();
1596 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1597 VPI->getMaskParam(),
1598 VPI->getVectorLengthParam());
1599 break;
1600 }
1601 default:
1602 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1603 MemIntrinsicInfo IntrInfo;
1604 if (TTI->getTgtMemIntrinsic(II, IntrInfo))
1605 Interesting = IntrInfo.InterestingOperands;
1606 return;
1607 }
1608 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1609 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1610 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1611 continue;
1612 Type *Ty = CI->getParamByValType(ArgNo);
1613 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1614 }
1615 }
1616 }
1617}
1618
1619static bool isPointerOperand(Value *V) {
1620 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1621}
1622
1623// This is a rough heuristic; it may cause both false positives and
1624// false negatives. The proper implementation requires cooperation with
1625// the frontend.
1627 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1628 if (!Cmp->isRelational())
1629 return false;
1630 } else {
1631 return false;
1632 }
1633 return isPointerOperand(I->getOperand(0)) &&
1634 isPointerOperand(I->getOperand(1));
1635}
1636
1637// This is a rough heuristic; it may cause both false positives and
1638// false negatives. The proper implementation requires cooperation with
1639// the frontend.
1642 if (BO->getOpcode() != Instruction::Sub)
1643 return false;
1644 } else {
1645 return false;
1646 }
1647 return isPointerOperand(I->getOperand(0)) &&
1648 isPointerOperand(I->getOperand(1));
1649}
1650
1651bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1652 // If a global variable does not have dynamic initialization we don't
1653 // have to instrument it. However, if a global does not have initializer
1654 // at all, we assume it has dynamic initializer (in other TU).
1655 if (!G->hasInitializer())
1656 return false;
1657
1658 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1659 return false;
1660
1661 return true;
1662}
1663
1664void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1665 Instruction *I, RuntimeCallInserter &RTCI) {
1666 IRBuilder<> IRB(I);
1667 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1668 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1669 for (Value *&i : Param) {
1670 if (i->getType()->isPointerTy())
1671 i = IRB.CreatePointerCast(i, IntptrTy);
1672 }
1673 RTCI.createRuntimeCall(IRB, F, Param);
1674}
1675
1676static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1677 Instruction *InsertBefore, Value *Addr,
1678 MaybeAlign Alignment, unsigned Granularity,
1679 TypeSize TypeStoreSize, bool IsWrite,
1680 Value *SizeArgument, bool UseCalls,
1681 uint32_t Exp, RuntimeCallInserter &RTCI) {
1682 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1683 // if the data is properly aligned.
1684 if (!TypeStoreSize.isScalable()) {
1685 const auto FixedSize = TypeStoreSize.getFixedValue();
1686 switch (FixedSize) {
1687 case 8:
1688 case 16:
1689 case 32:
1690 case 64:
1691 case 128:
1692 if (!Alignment || *Alignment >= Granularity ||
1693 *Alignment >= FixedSize / 8)
1694 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1695 FixedSize, IsWrite, nullptr, UseCalls,
1696 Exp, RTCI);
1697 }
1698 }
1699 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1700 IsWrite, nullptr, UseCalls, Exp, RTCI);
1701}
1702
1703void AddressSanitizer::instrumentMaskedLoadOrStore(
1704 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1705 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1706 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1707 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1708 RuntimeCallInserter &RTCI) {
1709 auto *VTy = cast<VectorType>(OpType);
1710 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1711 auto Zero = ConstantInt::get(IntptrTy, 0);
1712
1713 IRBuilder IB(I);
1714 Instruction *LoopInsertBefore = I;
1715 if (EVL) {
1716 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1717 // than zero, so we should check whether EVL is zero here.
1718 Type *EVLType = EVL->getType();
1719 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1720 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1721 IB.SetInsertPoint(LoopInsertBefore);
1722 // Cast EVL to IntptrTy.
1723 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1724 // To avoid undefined behavior for extracting with out of range index, use
1725 // the minimum of evl and element count as trip count.
1726 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1727 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1728 } else {
1729 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1730 }
1731
1732 // Cast Stride to IntptrTy.
1733 if (Stride)
1734 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1735
1736 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1737 [&](IRBuilderBase &IRB, Value *Index) {
1738 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1739 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1740 if (MaskElemC->isZero())
1741 // No check
1742 return;
1743 // Unconditional check
1744 } else {
1745 // Conditional check
1746 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1747 MaskElem, &*IRB.GetInsertPoint(), false);
1748 IRB.SetInsertPoint(ThenTerm);
1749 }
1750
1751 Value *InstrumentedAddress;
1752 if (isa<VectorType>(Addr->getType())) {
1753 assert(
1754 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1755 "Expected vector of pointer.");
1756 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1757 } else if (Stride) {
1758 Index = IRB.CreateMul(Index, Stride);
1759 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1760 } else {
1761 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1762 }
1763 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1764 Alignment, Granularity, ElemTypeSize, IsWrite,
1765 SizeArgument, UseCalls, Exp, RTCI);
1766 });
1767}
1768
1769void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1770 InterestingMemoryOperand &O, bool UseCalls,
1771 const DataLayout &DL,
1772 RuntimeCallInserter &RTCI) {
1773 Value *Addr = O.getPtr();
1774
1775 // Optimization experiments.
1776 // The experiments can be used to evaluate potential optimizations that remove
1777 // instrumentation (assess false negatives). Instead of completely removing
1778 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1779 // experiments that want to remove instrumentation of this instruction).
1780 // If Exp is non-zero, this pass will emit special calls into runtime
1781 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1782 // make runtime terminate the program in a special way (with a different
1783 // exit status). Then you run the new compiler on a buggy corpus, collect
1784 // the special terminations (ideally, you don't see them at all -- no false
1785 // negatives) and make the decision on the optimization.
1786 uint32_t Exp = ClForceExperiment;
1787
1788 if (ClOpt && ClOptGlobals) {
1789 // If initialization order checking is disabled, a simple access to a
1790 // dynamically initialized global is always valid.
1792 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1793 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1794 NumOptimizedAccessesToGlobalVar++;
1795 return;
1796 }
1797 }
1798
1799 if (ClOpt && ClOptStack) {
1800 // A direct inbounds access to a stack variable is always valid.
1802 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1803 NumOptimizedAccessesToStackVar++;
1804 return;
1805 }
1806 }
1807
1808 if (O.IsWrite)
1809 NumInstrumentedWrites++;
1810 else
1811 NumInstrumentedReads++;
1812
1813 if (O.MaybeByteOffset) {
1814 Type *Ty = Type::getInt8Ty(*C);
1815 IRBuilder IB(O.getInsn());
1816
1817 Value *OffsetOp = O.MaybeByteOffset;
1818 if (TargetTriple.isRISCV()) {
1819 Type *OffsetTy = OffsetOp->getType();
1820 // RVV indexed loads/stores zero-extend offset operands which are narrower
1821 // than XLEN to XLEN.
1822 if (OffsetTy->getScalarType()->getIntegerBitWidth() <
1823 static_cast<unsigned>(LongSize)) {
1824 VectorType *OrigType = cast<VectorType>(OffsetTy);
1825 Type *ExtendTy = VectorType::get(IntptrTy, OrigType);
1826 OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
1827 }
1828 }
1829 Addr = IB.CreateGEP(Ty, Addr, {OffsetOp});
1830 }
1831
1832 unsigned Granularity = 1 << Mapping.Scale;
1833 if (O.MaybeMask) {
1834 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1835 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1836 Granularity, O.OpType, O.IsWrite, nullptr,
1837 UseCalls, Exp, RTCI);
1838 } else {
1839 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1840 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1841 UseCalls, Exp, RTCI);
1842 }
1843}
1844
1845Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1846 Value *Addr, bool IsWrite,
1847 size_t AccessSizeIndex,
1848 Value *SizeArgument,
1849 uint32_t Exp,
1850 RuntimeCallInserter &RTCI) {
1851 InstrumentationIRBuilder IRB(InsertBefore);
1852 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1853 CallInst *Call = nullptr;
1854 if (SizeArgument) {
1855 if (Exp == 0)
1856 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1857 {Addr, SizeArgument});
1858 else
1859 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1860 {Addr, SizeArgument, ExpVal});
1861 } else {
1862 if (Exp == 0)
1863 Call = RTCI.createRuntimeCall(
1864 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1865 else
1866 Call = RTCI.createRuntimeCall(
1867 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1868 }
1869
1871 return Call;
1872}
1873
1874Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1875 Value *ShadowValue,
1876 uint32_t TypeStoreSize) {
1877 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1878 // Addr & (Granularity - 1)
1879 Value *LastAccessedByte =
1880 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1881 // (Addr & (Granularity - 1)) + size - 1
1882 if (TypeStoreSize / 8 > 1)
1883 LastAccessedByte = IRB.CreateAdd(
1884 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1885 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1886 LastAccessedByte =
1887 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1888 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1889 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1890}
1891
1892Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1893 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1894 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1895 // Do not instrument unsupported addrspaces.
1897 return nullptr;
1898 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1899 // Follow host instrumentation for global and constant addresses.
1900 if (PtrTy->getPointerAddressSpace() != 0)
1901 return InsertBefore;
1902 // Instrument generic addresses in supported addressspaces.
1903 IRBuilder<> IRB(InsertBefore);
1904 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1905 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1906 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1907 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1908 Value *AddrSpaceZeroLanding =
1909 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1910 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1911 return InsertBefore;
1912}
1913
1914Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1915 Value *Cond, bool Recover) {
1916 Module &M = *IRB.GetInsertBlock()->getModule();
1917 Value *ReportCond = Cond;
1918 if (!Recover) {
1919 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1920 IRB.getInt1Ty());
1921 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1922 }
1923
1924 auto *Trm =
1925 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1927 Trm->getParent()->setName("asan.report");
1928
1929 if (Recover)
1930 return Trm;
1931
1932 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1933 IRB.SetInsertPoint(Trm);
1934 return IRB.CreateCall(
1935 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1936}
1937
1938void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1939 Instruction *InsertBefore, Value *Addr,
1940 MaybeAlign Alignment,
1941 uint32_t TypeStoreSize, bool IsWrite,
1942 Value *SizeArgument, bool UseCalls,
1943 uint32_t Exp,
1944 RuntimeCallInserter &RTCI) {
1945 if (TargetTriple.isAMDGPU()) {
1946 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1947 TypeStoreSize, IsWrite, SizeArgument);
1948 if (!InsertBefore)
1949 return;
1950 }
1951
1952 InstrumentationIRBuilder IRB(InsertBefore);
1953 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1954
1955 if (UseCalls && ClOptimizeCallbacks) {
1956 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1957 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1958 {IRB.CreatePointerCast(Addr, PtrTy),
1959 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1960 return;
1961 }
1962
1963 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1964 if (UseCalls) {
1965 if (Exp == 0)
1966 RTCI.createRuntimeCall(
1967 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1968 else
1969 RTCI.createRuntimeCall(
1970 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1971 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1972 return;
1973 }
1974
1975 Type *ShadowTy =
1976 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1977 Type *ShadowPtrTy = PointerType::get(*C, ClShadowAddrSpace);
1978 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1979 const uint64_t ShadowAlign =
1980 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1981 Value *ShadowValue = IRB.CreateAlignedLoad(
1982 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1983
1984 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1985 size_t Granularity = 1ULL << Mapping.Scale;
1986 Instruction *CrashTerm = nullptr;
1987
1988 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1989
1990 if (TargetTriple.isAMDGCN()) {
1991 if (GenSlowPath) {
1992 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1993 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1994 }
1995 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1996 } else if (GenSlowPath) {
1997 // We use branch weights for the slow path check, to indicate that the slow
1998 // path is rarely taken. This seems to be the case for SPEC benchmarks.
2000 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
2001 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
2002 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
2003 IRB.SetInsertPoint(CheckTerm);
2004 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
2005 if (Recover) {
2006 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
2007 } else {
2008 BasicBlock *CrashBlock =
2009 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
2010 CrashTerm = new UnreachableInst(*C, CrashBlock);
2011 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
2012 ReplaceInstWithInst(CheckTerm, NewTerm);
2013 }
2014 } else {
2015 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
2016 }
2017
2018 Instruction *Crash = generateCrashCode(
2019 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
2020 if (OrigIns->getDebugLoc())
2021 Crash->setDebugLoc(OrigIns->getDebugLoc());
2022}
2023
2024// Instrument unusual size or unusual alignment.
2025// We can not do it with a single check, so we do 1-byte check for the first
2026// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
2027// to report the actual access size.
2028void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2029 Instruction *I, Instruction *InsertBefore, Value *Addr,
2030 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
2031 uint32_t Exp, RuntimeCallInserter &RTCI) {
2032 InstrumentationIRBuilder IRB(InsertBefore);
2033 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
2034 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
2035
2036 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2037 if (UseCalls) {
2038 if (Exp == 0)
2039 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2040 {AddrLong, Size});
2041 else
2042 RTCI.createRuntimeCall(
2043 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2044 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2045 } else {
2046 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2047 Value *LastByte = IRB.CreateIntToPtr(
2048 IRB.CreateAdd(AddrLong, SizeMinusOne),
2049 Addr->getType());
2050 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2051 RTCI);
2052 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2053 Exp, RTCI);
2054 }
2055}
2056
2057void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2058 // Set up the arguments to our poison/unpoison functions.
2059 IRBuilder<> IRB(&GlobalInit.front(),
2060 GlobalInit.front().getFirstInsertionPt());
2061
2062 // Add a call to poison all external globals before the given function starts.
2063 Value *ModuleNameAddr =
2064 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2065 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2066
2067 // Add calls to unpoison all globals before each return instruction.
2068 for (auto &BB : GlobalInit)
2070 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2071}
2072
2073void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2074 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2075 if (!GV)
2076 return;
2077
2079 if (!CA)
2080 return;
2081
2082 for (Use &OP : CA->operands()) {
2083 if (isa<ConstantAggregateZero>(OP)) continue;
2085
2086 // Must have a function or null ptr.
2087 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2088 if (F->getName() == kAsanModuleCtorName) continue;
2089 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2090 // Don't instrument CTORs that will run before asan.module_ctor.
2091 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2092 continue;
2093 poisonOneInitializer(*F);
2094 }
2095 }
2096}
2097
2098const GlobalVariable *
2099ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2100 // In case this function should be expanded to include rules that do not just
2101 // apply when CompileKernel is true, either guard all existing rules with an
2102 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2103 // should also apply to user space.
2104 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2105
2106 const Constant *C = GA.getAliasee();
2107
2108 // When compiling the kernel, globals that are aliased by symbols prefixed
2109 // by "__" are special and cannot be padded with a redzone.
2110 if (GA.getName().starts_with("__"))
2111 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2112
2113 return nullptr;
2114}
2115
2116bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2117 Type *Ty = G->getValueType();
2118 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2119
2120 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2121 return false;
2122 if (!Ty->isSized()) return false;
2123 if (!G->hasInitializer()) return false;
2124 if (!isSupportedAddrspace(TargetTriple, G))
2125 return false;
2126 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2127 // Two problems with thread-locals:
2128 // - The address of the main thread's copy can't be computed at link-time.
2129 // - Need to poison all copies, not just the main thread's one.
2130 if (G->isThreadLocal()) return false;
2131 // For now, just ignore this Global if the alignment is large.
2132 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2133
2134 // For non-COFF targets, only instrument globals known to be defined by this
2135 // TU.
2136 // FIXME: We can instrument comdat globals on ELF if we are using the
2137 // GC-friendly metadata scheme.
2138 if (!TargetTriple.isOSBinFormatCOFF()) {
2139 if (!G->hasExactDefinition() || G->hasComdat())
2140 return false;
2141 } else {
2142 // On COFF, don't instrument non-ODR linkages.
2143 if (G->isInterposable())
2144 return false;
2145 // If the global has AvailableExternally linkage, then it is not in this
2146 // module, which means it does not need to be instrumented.
2147 if (G->hasAvailableExternallyLinkage())
2148 return false;
2149 }
2150
2151 // If a comdat is present, it must have a selection kind that implies ODR
2152 // semantics: no duplicates, any, or exact match.
2153 if (Comdat *C = G->getComdat()) {
2154 switch (C->getSelectionKind()) {
2155 case Comdat::Any:
2156 case Comdat::ExactMatch:
2158 break;
2159 case Comdat::Largest:
2160 case Comdat::SameSize:
2161 return false;
2162 }
2163 }
2164
2165 if (G->hasSection()) {
2166 // The kernel uses explicit sections for mostly special global variables
2167 // that we should not instrument. E.g. the kernel may rely on their layout
2168 // without redzones, or remove them at link time ("discard.*"), etc.
2169 if (CompileKernel)
2170 return false;
2171
2172 StringRef Section = G->getSection();
2173
2174 // Globals from llvm.metadata aren't emitted, do not instrument them.
2175 if (Section == "llvm.metadata") return false;
2176 // Do not instrument globals from special LLVM sections.
2177 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2178 return false;
2179
2180 // Do not instrument function pointers to initialization and termination
2181 // routines: dynamic linker will not properly handle redzones.
2182 if (Section.starts_with(".preinit_array") ||
2183 Section.starts_with(".init_array") ||
2184 Section.starts_with(".fini_array")) {
2185 return false;
2186 }
2187
2188 // Do not instrument user-defined sections (with names resembling
2189 // valid C identifiers)
2190 if (TargetTriple.isOSBinFormatELF()) {
2191 if (llvm::all_of(Section,
2192 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2193 return false;
2194 }
2195
2196 // On COFF, if the section name contains '$', it is highly likely that the
2197 // user is using section sorting to create an array of globals similar to
2198 // the way initialization callbacks are registered in .init_array and
2199 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2200 // to such globals is counterproductive, because the intent is that they
2201 // will form an array, and out-of-bounds accesses are expected.
2202 // See https://github.com/google/sanitizers/issues/305
2203 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2204 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2205 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2206 << *G << "\n");
2207 return false;
2208 }
2209
2210 if (TargetTriple.isOSBinFormatMachO()) {
2211 StringRef ParsedSegment, ParsedSection;
2212 unsigned TAA = 0, StubSize = 0;
2213 bool TAAParsed;
2215 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2216
2217 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2218 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2219 // them.
2220 if (ParsedSegment == "__OBJC" ||
2221 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2222 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2223 return false;
2224 }
2225 // See https://github.com/google/sanitizers/issues/32
2226 // Constant CFString instances are compiled in the following way:
2227 // -- the string buffer is emitted into
2228 // __TEXT,__cstring,cstring_literals
2229 // -- the constant NSConstantString structure referencing that buffer
2230 // is placed into __DATA,__cfstring
2231 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2232 // Moreover, it causes the linker to crash on OS X 10.7
2233 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2234 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2235 return false;
2236 }
2237 // The linker merges the contents of cstring_literals and removes the
2238 // trailing zeroes.
2239 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2240 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2241 return false;
2242 }
2243 }
2244 }
2245
2246 if (CompileKernel) {
2247 // Globals that prefixed by "__" are special and cannot be padded with a
2248 // redzone.
2249 if (G->getName().starts_with("__"))
2250 return false;
2251 }
2252
2253 return true;
2254}
2255
2256// On Mach-O platforms, we emit global metadata in a separate section of the
2257// binary in order to allow the linker to properly dead strip. This is only
2258// supported on recent versions of ld64.
2259bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2260 if (!TargetTriple.isOSBinFormatMachO())
2261 return false;
2262
2263 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2264 return true;
2265 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2266 return true;
2267 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2268 return true;
2269 if (TargetTriple.isDriverKit())
2270 return true;
2271 if (TargetTriple.isXROS())
2272 return true;
2273
2274 return false;
2275}
2276
2277StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2278 switch (TargetTriple.getObjectFormat()) {
2279 case Triple::COFF: return ".ASAN$GL";
2280 case Triple::ELF: return "asan_globals";
2281 case Triple::MachO: return "__DATA,__asan_globals,regular";
2282 case Triple::Wasm:
2283 case Triple::GOFF:
2284 case Triple::SPIRV:
2285 case Triple::XCOFF:
2288 "ModuleAddressSanitizer not implemented for object file format");
2290 break;
2291 }
2292 llvm_unreachable("unsupported object format");
2293}
2294
2295void ModuleAddressSanitizer::initializeCallbacks() {
2296 IRBuilder<> IRB(*C);
2297
2298 // Declare our poisoning and unpoisoning functions.
2299 AsanPoisonGlobals =
2300 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2301 AsanUnpoisonGlobals =
2302 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2303
2304 // Declare functions that register/unregister globals.
2305 AsanRegisterGlobals = M.getOrInsertFunction(
2306 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2307 AsanUnregisterGlobals = M.getOrInsertFunction(
2308 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2309
2310 // Declare the functions that find globals in a shared object and then invoke
2311 // the (un)register function on them.
2312 AsanRegisterImageGlobals = M.getOrInsertFunction(
2313 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2314 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2316
2317 AsanRegisterElfGlobals =
2318 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2319 IntptrTy, IntptrTy, IntptrTy);
2320 AsanUnregisterElfGlobals =
2321 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2322 IntptrTy, IntptrTy, IntptrTy);
2323}
2324
2325// Put the metadata and the instrumented global in the same group. This ensures
2326// that the metadata is discarded if the instrumented global is discarded.
2327void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2328 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2329 Module &M = *G->getParent();
2330 Comdat *C = G->getComdat();
2331 if (!C) {
2332 if (!G->hasName()) {
2333 // If G is unnamed, it must be internal. Give it an artificial name
2334 // so we can put it in a comdat.
2335 assert(G->hasLocalLinkage());
2336 G->setName(genName("anon_global"));
2337 }
2338
2339 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2340 std::string Name = std::string(G->getName());
2341 Name += InternalSuffix;
2342 C = M.getOrInsertComdat(Name);
2343 } else {
2344 C = M.getOrInsertComdat(G->getName());
2345 }
2346
2347 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2348 // linkage to internal linkage so that a symbol table entry is emitted. This
2349 // is necessary in order to create the comdat group.
2350 if (TargetTriple.isOSBinFormatCOFF()) {
2351 C->setSelectionKind(Comdat::NoDeduplicate);
2352 if (G->hasPrivateLinkage())
2353 G->setLinkage(GlobalValue::InternalLinkage);
2354 }
2355 G->setComdat(C);
2356 }
2357
2358 assert(G->hasComdat());
2359 Metadata->setComdat(G->getComdat());
2360}
2361
2362// Create a separate metadata global and put it in the appropriate ASan
2363// global registration section.
2365ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2366 StringRef OriginalName) {
2367 auto Linkage = TargetTriple.isOSBinFormatMachO()
2371 M, Initializer->getType(), false, Linkage, Initializer,
2372 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2373 Metadata->setSection(getGlobalMetadataSection());
2374 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2375 // relocation pressure.
2377 return Metadata;
2378}
2379
2380Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2381 AsanDtorFunction = Function::createWithDefaultAttr(
2384 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2385 // Ensure Dtor cannot be discarded, even if in a comdat.
2386 appendToUsed(M, {AsanDtorFunction});
2387 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2388
2389 return ReturnInst::Create(*C, AsanDtorBB);
2390}
2391
2392void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2393 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2394 ArrayRef<Constant *> MetadataInitializers) {
2395 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2396 auto &DL = M.getDataLayout();
2397
2398 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2399 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2400 Constant *Initializer = MetadataInitializers[i];
2401 GlobalVariable *G = ExtendedGlobals[i];
2402 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2403 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2404 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2405 MetadataGlobals[i] = Metadata;
2406
2407 // The MSVC linker always inserts padding when linking incrementally. We
2408 // cope with that by aligning each struct to its size, which must be a power
2409 // of two.
2410 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2411 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2412 "global metadata will not be padded appropriately");
2413 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2414
2415 SetComdatForGlobalMetadata(G, Metadata, "");
2416 }
2417
2418 // Update llvm.compiler.used, adding the new metadata globals. This is
2419 // needed so that during LTO these variables stay alive.
2420 if (!MetadataGlobals.empty())
2421 appendToCompilerUsed(M, MetadataGlobals);
2422}
2423
2424void ModuleAddressSanitizer::instrumentGlobalsELF(
2425 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2426 ArrayRef<Constant *> MetadataInitializers,
2427 const std::string &UniqueModuleId) {
2428 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2429
2430 // Putting globals in a comdat changes the semantic and potentially cause
2431 // false negative odr violations at link time. If odr indicators are used, we
2432 // keep the comdat sections, as link time odr violations will be detected on
2433 // the odr indicator symbols.
2434 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2435
2436 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2437 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2438 GlobalVariable *G = ExtendedGlobals[i];
2440 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2441 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2442 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2443 MetadataGlobals[i] = Metadata;
2444
2445 if (UseComdatForGlobalsGC)
2446 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2447 }
2448
2449 // Update llvm.compiler.used, adding the new metadata globals. This is
2450 // needed so that during LTO these variables stay alive.
2451 if (!MetadataGlobals.empty())
2452 appendToCompilerUsed(M, MetadataGlobals);
2453
2454 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2455 // to look up the loaded image that contains it. Second, we can store in it
2456 // whether registration has already occurred, to prevent duplicate
2457 // registration.
2458 //
2459 // Common linkage ensures that there is only one global per shared library.
2460 GlobalVariable *RegisteredFlag = new GlobalVariable(
2461 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2462 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2464
2465 // Create start and stop symbols.
2466 GlobalVariable *StartELFMetadata = new GlobalVariable(
2467 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2468 "__start_" + getGlobalMetadataSection());
2470 GlobalVariable *StopELFMetadata = new GlobalVariable(
2471 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2472 "__stop_" + getGlobalMetadataSection());
2474
2475 // Create a call to register the globals with the runtime.
2476 if (ConstructorKind == AsanCtorKind::Global)
2477 IRB.CreateCall(AsanRegisterElfGlobals,
2478 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2479 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2480 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2481
2482 // We also need to unregister globals at the end, e.g., when a shared library
2483 // gets closed.
2484 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2485 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2486 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2487 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2488 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2489 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2490 }
2491}
2492
2493void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2494 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2495 ArrayRef<Constant *> MetadataInitializers) {
2496 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2497
2498 // On recent Mach-O platforms, use a structure which binds the liveness of
2499 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2500 // created to be added to llvm.compiler.used
2501 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2502 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2503
2504 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2505 Constant *Initializer = MetadataInitializers[i];
2506 GlobalVariable *G = ExtendedGlobals[i];
2507 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2508
2509 // On recent Mach-O platforms, we emit the global metadata in a way that
2510 // allows the linker to properly strip dead globals.
2511 auto LivenessBinder =
2512 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2514 GlobalVariable *Liveness = new GlobalVariable(
2515 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2516 Twine("__asan_binder_") + G->getName());
2517 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2518 LivenessGlobals[i] = Liveness;
2519 }
2520
2521 // Update llvm.compiler.used, adding the new liveness globals. This is
2522 // needed so that during LTO these variables stay alive. The alternative
2523 // would be to have the linker handling the LTO symbols, but libLTO
2524 // current API does not expose access to the section for each symbol.
2525 if (!LivenessGlobals.empty())
2526 appendToCompilerUsed(M, LivenessGlobals);
2527
2528 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2529 // to look up the loaded image that contains it. Second, we can store in it
2530 // whether registration has already occurred, to prevent duplicate
2531 // registration.
2532 //
2533 // common linkage ensures that there is only one global per shared library.
2534 GlobalVariable *RegisteredFlag = new GlobalVariable(
2535 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2536 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2538
2539 if (ConstructorKind == AsanCtorKind::Global)
2540 IRB.CreateCall(AsanRegisterImageGlobals,
2541 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2542
2543 // We also need to unregister globals at the end, e.g., when a shared library
2544 // gets closed.
2545 if (DestructorKind != AsanDtorKind::None) {
2546 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2547 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2548 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2549 }
2550}
2551
2552void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2553 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2554 ArrayRef<Constant *> MetadataInitializers) {
2555 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2556 unsigned N = ExtendedGlobals.size();
2557 assert(N > 0);
2558
2559 // On platforms that don't have a custom metadata section, we emit an array
2560 // of global metadata structures.
2561 ArrayType *ArrayOfGlobalStructTy =
2562 ArrayType::get(MetadataInitializers[0]->getType(), N);
2563 auto AllGlobals = new GlobalVariable(
2564 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2565 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2566 if (Mapping.Scale > 3)
2567 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2568
2569 if (ConstructorKind == AsanCtorKind::Global)
2570 IRB.CreateCall(AsanRegisterGlobals,
2571 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2572 ConstantInt::get(IntptrTy, N)});
2573
2574 // We also need to unregister globals at the end, e.g., when a shared library
2575 // gets closed.
2576 if (DestructorKind != AsanDtorKind::None) {
2577 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2578 IrbDtor.CreateCall(AsanUnregisterGlobals,
2579 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2580 ConstantInt::get(IntptrTy, N)});
2581 }
2582}
2583
2584// This function replaces all global variables with new variables that have
2585// trailing redzones. It also creates a function that poisons
2586// redzones and inserts this function into llvm.global_ctors.
2587// Sets *CtorComdat to true if the global registration code emitted into the
2588// asan constructor is comdat-compatible.
2589void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2590 bool *CtorComdat) {
2591 // Build set of globals that are aliased by some GA, where
2592 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2593 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2594 if (CompileKernel) {
2595 for (auto &GA : M.aliases()) {
2596 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2597 AliasedGlobalExclusions.insert(GV);
2598 }
2599 }
2600
2601 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2602 for (auto &G : M.globals()) {
2603 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2604 GlobalsToChange.push_back(&G);
2605 }
2606
2607 size_t n = GlobalsToChange.size();
2608 auto &DL = M.getDataLayout();
2609
2610 // A global is described by a structure
2611 // size_t beg;
2612 // size_t size;
2613 // size_t size_with_redzone;
2614 // const char *name;
2615 // const char *module_name;
2616 // size_t has_dynamic_init;
2617 // size_t padding_for_windows_msvc_incremental_link;
2618 // size_t odr_indicator;
2619 // We initialize an array of such structures and pass it to a run-time call.
2620 StructType *GlobalStructTy =
2621 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2622 IntptrTy, IntptrTy, IntptrTy);
2624 SmallVector<Constant *, 16> Initializers(n);
2625
2626 for (size_t i = 0; i < n; i++) {
2627 GlobalVariable *G = GlobalsToChange[i];
2628
2630 if (G->hasSanitizerMetadata())
2631 MD = G->getSanitizerMetadata();
2632
2633 // The runtime library tries demangling symbol names in the descriptor but
2634 // functionality like __cxa_demangle may be unavailable (e.g.
2635 // -static-libstdc++). So we demangle the symbol names here.
2636 std::string NameForGlobal = G->getName().str();
2639 /*AllowMerging*/ true, genName("global"));
2640
2641 Type *Ty = G->getValueType();
2642 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2643 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2644 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2645
2646 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2647 Constant *NewInitializer = ConstantStruct::get(
2648 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2649
2650 // Create a new global variable with enough space for a redzone.
2651 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2652 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2654 GlobalVariable *NewGlobal = new GlobalVariable(
2655 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2656 G->getThreadLocalMode(), G->getAddressSpace());
2657 NewGlobal->copyAttributesFrom(G);
2658 NewGlobal->setComdat(G->getComdat());
2659 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2660 // Don't fold globals with redzones. ODR violation detector and redzone
2661 // poisoning implicitly creates a dependence on the global's address, so it
2662 // is no longer valid for it to be marked unnamed_addr.
2664
2665 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2666 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2667 G->isConstant()) {
2668 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2669 if (Seq && Seq->isCString())
2670 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2671 }
2672
2673 // Transfer the debug info and type metadata. The payload starts at offset
2674 // zero so we can copy the metadata over as is.
2675 NewGlobal->copyMetadata(G, 0);
2676
2677 G->replaceAllUsesWith(NewGlobal);
2678 NewGlobal->takeName(G);
2679 G->eraseFromParent();
2680 NewGlobals[i] = NewGlobal;
2681
2682 Constant *ODRIndicator = Constant::getNullValue(IntptrTy);
2683 GlobalValue *InstrumentedGlobal = NewGlobal;
2684
2685 bool CanUsePrivateAliases =
2686 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2687 TargetTriple.isOSBinFormatWasm();
2688 if (CanUsePrivateAliases && UsePrivateAlias) {
2689 // Create local alias for NewGlobal to avoid crash on ODR between
2690 // instrumented and non-instrumented libraries.
2691 InstrumentedGlobal =
2693 }
2694
2695 // ODR should not happen for local linkage.
2696 if (NewGlobal->hasLocalLinkage()) {
2697 ODRIndicator = ConstantInt::getAllOnesValue(IntptrTy);
2698 } else if (UseOdrIndicator) {
2699 // With local aliases, we need to provide another externally visible
2700 // symbol __odr_asan_XXX to detect ODR violation.
2701 auto *ODRIndicatorSym =
2702 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2704 kODRGenPrefix + NameForGlobal, nullptr,
2705 NewGlobal->getThreadLocalMode());
2706
2707 // Set meaningful attributes for indicator symbol.
2708 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2709 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2710 ODRIndicatorSym->setAlignment(Align(1));
2711 ODRIndicator = ConstantExpr::getPtrToInt(ODRIndicatorSym, IntptrTy);
2712 }
2713
2714 Constant *Initializer = ConstantStruct::get(
2715 GlobalStructTy,
2716 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2717 ConstantInt::get(IntptrTy, SizeInBytes),
2718 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2719 ConstantExpr::getPointerCast(Name, IntptrTy),
2720 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2721 ConstantInt::get(IntptrTy, MD.IsDynInit),
2722 Constant::getNullValue(IntptrTy), ODRIndicator);
2723
2724 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2725
2726 Initializers[i] = Initializer;
2727 }
2728
2729 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2730 // ConstantMerge'ing them.
2731 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2732 for (size_t i = 0; i < n; i++) {
2733 GlobalVariable *G = NewGlobals[i];
2734 if (G->getName().empty()) continue;
2735 GlobalsToAddToUsedList.push_back(G);
2736 }
2737 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2738
2739 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2740 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2741 // linkage unit will only have one module constructor, and (b) the register
2742 // function will be called. The module destructor is not created when n ==
2743 // 0.
2744 *CtorComdat = true;
2745 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2746 } else if (n == 0) {
2747 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2748 // all compile units will have identical module constructor/destructor.
2749 *CtorComdat = TargetTriple.isOSBinFormatELF();
2750 } else {
2751 *CtorComdat = false;
2752 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2753 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2754 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2755 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2756 } else {
2757 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2758 }
2759 }
2760
2761 // Create calls for poisoning before initializers run and unpoisoning after.
2762 if (ClInitializers)
2763 createInitializerPoisonCalls();
2764
2765 LLVM_DEBUG(dbgs() << M);
2766}
2767
2768uint64_t
2769ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2770 constexpr uint64_t kMaxRZ = 1 << 18;
2771 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2772
2773 uint64_t RZ = 0;
2774 if (SizeInBytes <= MinRZ / 2) {
2775 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2776 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2777 // half of MinRZ.
2778 RZ = MinRZ - SizeInBytes;
2779 } else {
2780 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2781 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2782
2783 // Round up to multiple of MinRZ.
2784 if (SizeInBytes % MinRZ)
2785 RZ += MinRZ - (SizeInBytes % MinRZ);
2786 }
2787
2788 assert((RZ + SizeInBytes) % MinRZ == 0);
2789
2790 return RZ;
2791}
2792
2793int ModuleAddressSanitizer::GetAsanVersion() const {
2794 int LongSize = M.getDataLayout().getPointerSizeInBits();
2795 bool isAndroid = M.getTargetTriple().isAndroid();
2796 int Version = 8;
2797 // 32-bit Android is one version ahead because of the switch to dynamic
2798 // shadow.
2799 Version += (LongSize == 32 && isAndroid);
2800 return Version;
2801}
2802
2803GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2804 if (!ModuleName) {
2805 // We shouldn't merge same module names, as this string serves as unique
2806 // module ID in runtime.
2807 ModuleName =
2808 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2809 /*AllowMerging*/ false, genName("module"));
2810 }
2811 return ModuleName;
2812}
2813
2814bool ModuleAddressSanitizer::instrumentModule() {
2815 initializeCallbacks();
2816
2817 for (Function &F : M)
2818 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2819
2820 // Create a module constructor. A destructor is created lazily because not all
2821 // platforms, and not all modules need it.
2822 if (ConstructorKind == AsanCtorKind::Global) {
2823 if (CompileKernel) {
2824 // The kernel always builds with its own runtime, and therefore does not
2825 // need the init and version check calls.
2826 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2827 } else {
2828 std::string AsanVersion = std::to_string(GetAsanVersion());
2829 std::string VersionCheckName =
2830 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2831 std::tie(AsanCtorFunction, std::ignore) =
2833 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2834 /*InitArgs=*/{}, VersionCheckName);
2835 }
2836 }
2837
2838 bool CtorComdat = true;
2839 if (ClGlobals) {
2840 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2841 if (AsanCtorFunction) {
2842 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2843 instrumentGlobals(IRB, &CtorComdat);
2844 } else {
2845 IRBuilder<> IRB(*C);
2846 instrumentGlobals(IRB, &CtorComdat);
2847 }
2848 }
2849
2850 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2851
2852 // Put the constructor and destructor in comdat if both
2853 // (1) global instrumentation is not TU-specific
2854 // (2) target is ELF.
2855 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2856 if (AsanCtorFunction) {
2857 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2858 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2859 }
2860 if (AsanDtorFunction) {
2861 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2862 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2863 }
2864 } else {
2865 if (AsanCtorFunction)
2866 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2867 if (AsanDtorFunction)
2868 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2869 }
2870
2871 return true;
2872}
2873
2874void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2875 IRBuilder<> IRB(*C);
2876 // Create __asan_report* callbacks.
2877 // IsWrite, TypeSize and Exp are encoded in the function name.
2878 for (int Exp = 0; Exp < 2; Exp++) {
2879 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2880 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2881 const std::string ExpStr = Exp ? "exp_" : "";
2882 const std::string EndingStr = Recover ? "_noabort" : "";
2883
2884 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2885 SmallVector<Type *, 2> Args1{1, IntptrTy};
2886 AttributeList AL2;
2887 AttributeList AL1;
2888 if (Exp) {
2889 Type *ExpType = Type::getInt32Ty(*C);
2890 Args2.push_back(ExpType);
2891 Args1.push_back(ExpType);
2892 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2893 AL2 = AL2.addParamAttribute(*C, 2, AK);
2894 AL1 = AL1.addParamAttribute(*C, 1, AK);
2895 }
2896 }
2897 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2898 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2899 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2900
2901 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2902 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2903 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2904
2905 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2906 AccessSizeIndex++) {
2907 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2908 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2909 M.getOrInsertFunction(
2910 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2911 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2912
2913 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2914 M.getOrInsertFunction(
2915 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2916 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2917 }
2918 }
2919 }
2920
2921 const std::string MemIntrinCallbackPrefix =
2922 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2923 ? std::string("")
2925 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2926 PtrTy, PtrTy, PtrTy, IntptrTy);
2927 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2928 PtrTy, PtrTy, IntptrTy);
2929 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2930 TLI->getAttrList(C, {1}, /*Signed=*/false),
2931 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2932
2933 AsanHandleNoReturnFunc =
2934 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2935
2936 AsanPtrCmpFunction =
2937 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2938 AsanPtrSubFunction =
2939 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2940 if (Mapping.InGlobal)
2941 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2942 ArrayType::get(IRB.getInt8Ty(), 0));
2943
2944 AMDGPUAddressShared =
2945 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2946 AMDGPUAddressPrivate =
2947 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2948}
2949
2950bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2951 // For each NSObject descendant having a +load method, this method is invoked
2952 // by the ObjC runtime before any of the static constructors is called.
2953 // Therefore we need to instrument such methods with a call to __asan_init
2954 // at the beginning in order to initialize our runtime before any access to
2955 // the shadow memory.
2956 // We cannot just ignore these methods, because they may call other
2957 // instrumented functions.
2958 if (F.getName().contains(" load]")) {
2959 FunctionCallee AsanInitFunction =
2960 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2961 IRBuilder<> IRB(&F.front(), F.front().begin());
2962 IRB.CreateCall(AsanInitFunction, {});
2963 return true;
2964 }
2965 return false;
2966}
2967
2968bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2969 // Generate code only when dynamic addressing is needed.
2970 if (Mapping.Offset != kDynamicShadowSentinel)
2971 return false;
2972
2973 IRBuilder<> IRB(&F.front().front());
2974 if (Mapping.InGlobal) {
2976 // An empty inline asm with input reg == output reg.
2977 // An opaque pointer-to-int cast, basically.
2979 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2980 StringRef(""), StringRef("=r,0"),
2981 /*hasSideEffects=*/false);
2982 LocalDynamicShadow =
2983 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2984 } else {
2985 LocalDynamicShadow =
2986 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2987 }
2988 } else {
2989 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2991 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2992 }
2993 return true;
2994}
2995
2996void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2997 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2998 // to it as uninteresting. This assumes we haven't started processing allocas
2999 // yet. This check is done up front because iterating the use list in
3000 // isInterestingAlloca would be algorithmically slower.
3001 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
3002
3003 // Try to get the declaration of llvm.localescape. If it's not in the module,
3004 // we can exit early.
3005 if (!F.getParent()->getFunction("llvm.localescape")) return;
3006
3007 // Look for a call to llvm.localescape call in the entry block. It can't be in
3008 // any other block.
3009 for (Instruction &I : F.getEntryBlock()) {
3011 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
3012 // We found a call. Mark all the allocas passed in as uninteresting.
3013 for (Value *Arg : II->args()) {
3014 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
3015 assert(AI && AI->isStaticAlloca() &&
3016 "non-static alloca arg to localescape");
3017 ProcessedAllocas[AI] = false;
3018 }
3019 break;
3020 }
3021 }
3022}
3023// Mitigation for https://github.com/google/sanitizers/issues/749
3024// We don't instrument Windows catch-block parameters to avoid
3025// interfering with exception handling assumptions.
3026void AddressSanitizer::markCatchParametersAsUninteresting(Function &F) {
3027 for (BasicBlock &BB : F) {
3028 for (Instruction &I : BB) {
3029 if (auto *CatchPad = dyn_cast<CatchPadInst>(&I)) {
3030 // Mark the parameters to a catch-block as uninteresting to avoid
3031 // instrumenting them.
3032 for (Value *Operand : CatchPad->arg_operands())
3033 if (auto *AI = dyn_cast<AllocaInst>(Operand))
3034 ProcessedAllocas[AI] = false;
3035 }
3036 }
3037 }
3038}
3039
3040bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
3041 bool ShouldInstrument =
3042 ClDebugMin < 0 || ClDebugMax < 0 ||
3043 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3044 Instrumented++;
3045 return !ShouldInstrument;
3046}
3047
3048bool AddressSanitizer::instrumentFunction(Function &F,
3049 const TargetLibraryInfo *TLI,
3050 const TargetTransformInfo *TTI) {
3051 bool FunctionModified = false;
3052
3053 // Do not apply any instrumentation for naked functions.
3054 if (F.hasFnAttribute(Attribute::Naked))
3055 return FunctionModified;
3056
3057 // If needed, insert __asan_init before checking for SanitizeAddress attr.
3058 // This function needs to be called even if the function body is not
3059 // instrumented.
3060 if (maybeInsertAsanInitAtFunctionEntry(F))
3061 FunctionModified = true;
3062
3063 // Leave if the function doesn't need instrumentation.
3064 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3065
3066 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3067 return FunctionModified;
3068
3069 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3070
3071 initializeCallbacks(TLI);
3072
3073 FunctionStateRAII CleanupObj(this);
3074
3075 RuntimeCallInserter RTCI(F);
3076
3077 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3078
3079 // We can't instrument allocas used with llvm.localescape. Only static allocas
3080 // can be passed to that intrinsic.
3081 markEscapedLocalAllocas(F);
3082
3083 if (TargetTriple.isOSWindows())
3084 markCatchParametersAsUninteresting(F);
3085
3086 // We want to instrument every address only once per basic block (unless there
3087 // are calls between uses).
3088 SmallPtrSet<Value *, 16> TempsToInstrument;
3089 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3090 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3091 SmallVector<Instruction *, 8> NoReturnCalls;
3093 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3094
3095 // Fill the set of memory operations to instrument.
3096 for (auto &BB : F) {
3097 AllBlocks.push_back(&BB);
3098 TempsToInstrument.clear();
3099 int NumInsnsPerBB = 0;
3100 for (auto &Inst : BB) {
3101 if (LooksLikeCodeInBug11395(&Inst)) return false;
3102 // Skip instructions inserted by another instrumentation.
3103 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3104 continue;
3105 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3106 getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
3107
3108 if (!InterestingOperands.empty()) {
3109 for (auto &Operand : InterestingOperands) {
3110 if (ClOpt && ClOptSameTemp) {
3111 Value *Ptr = Operand.getPtr();
3112 // If we have a mask, skip instrumentation if we've already
3113 // instrumented the full object. But don't add to TempsToInstrument
3114 // because we might get another load/store with a different mask.
3115 if (Operand.MaybeMask) {
3116 if (TempsToInstrument.count(Ptr))
3117 continue; // We've seen this (whole) temp in the current BB.
3118 } else {
3119 if (!TempsToInstrument.insert(Ptr).second)
3120 continue; // We've seen this temp in the current BB.
3121 }
3122 }
3123 OperandsToInstrument.push_back(Operand);
3124 NumInsnsPerBB++;
3125 }
3126 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3130 PointerComparisonsOrSubtracts.push_back(&Inst);
3131 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3132 // ok, take it.
3133 IntrinToInstrument.push_back(MI);
3134 NumInsnsPerBB++;
3135 } else {
3136 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3137 // A call inside BB.
3138 TempsToInstrument.clear();
3139 if (CB->doesNotReturn())
3140 NoReturnCalls.push_back(CB);
3141 }
3142 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3144 }
3145 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3146 }
3147 }
3148
3149 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3150 OperandsToInstrument.size() + IntrinToInstrument.size() >
3151 (unsigned)InstrumentationWithCallsThreshold);
3152 const DataLayout &DL = F.getDataLayout();
3153 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3154
3155 // Instrument.
3156 int NumInstrumented = 0;
3157 for (auto &Operand : OperandsToInstrument) {
3158 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3159 instrumentMop(ObjSizeVis, Operand, UseCalls,
3160 F.getDataLayout(), RTCI);
3161 FunctionModified = true;
3162 }
3163 for (auto *Inst : IntrinToInstrument) {
3164 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3165 instrumentMemIntrinsic(Inst, RTCI);
3166 FunctionModified = true;
3167 }
3168
3169 FunctionStackPoisoner FSP(F, *this, RTCI);
3170 bool ChangedStack = FSP.runOnFunction();
3171
3172 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3173 // See e.g. https://github.com/google/sanitizers/issues/37
3174 for (auto *CI : NoReturnCalls) {
3175 IRBuilder<> IRB(CI);
3176 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3177 }
3178
3179 for (auto *Inst : PointerComparisonsOrSubtracts) {
3180 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3181 FunctionModified = true;
3182 }
3183
3184 if (ChangedStack || !NoReturnCalls.empty())
3185 FunctionModified = true;
3186
3187 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3188 << F << "\n");
3189
3190 return FunctionModified;
3191}
3192
3193// Workaround for bug 11395: we don't want to instrument stack in functions
3194// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3195// FIXME: remove once the bug 11395 is fixed.
3196bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3197 if (LongSize != 32) return false;
3199 if (!CI || !CI->isInlineAsm()) return false;
3200 if (CI->arg_size() <= 5)
3201 return false;
3202 // We have inline assembly with quite a few arguments.
3203 return true;
3204}
3205
3206void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3207 IRBuilder<> IRB(*C);
3208 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3209 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3210 const char *MallocNameTemplate =
3211 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3214 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3215 std::string Suffix = itostr(Index);
3216 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3217 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3218 AsanStackFreeFunc[Index] =
3219 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3220 IRB.getVoidTy(), IntptrTy, IntptrTy);
3221 }
3222 }
3223 if (ASan.UseAfterScope) {
3224 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3225 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3226 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3227 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3228 }
3229
3230 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3231 0xf3, 0xf5, 0xf8}) {
3232 std::ostringstream Name;
3234 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3235 AsanSetShadowFunc[Val] =
3236 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3237 }
3238
3239 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3240 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3241 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3242 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3243}
3244
3245void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3246 ArrayRef<uint8_t> ShadowBytes,
3247 size_t Begin, size_t End,
3248 IRBuilder<> &IRB,
3249 Value *ShadowBase) {
3250 if (Begin >= End)
3251 return;
3252
3253 const size_t LargestStoreSizeInBytes =
3254 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3255
3256 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3257
3258 // Poison given range in shadow using larges store size with out leading and
3259 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3260 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3261 // middle of a store.
3262 for (size_t i = Begin; i < End;) {
3263 if (!ShadowMask[i]) {
3264 assert(!ShadowBytes[i]);
3265 ++i;
3266 continue;
3267 }
3268
3269 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3270 // Fit store size into the range.
3271 while (StoreSizeInBytes > End - i)
3272 StoreSizeInBytes /= 2;
3273
3274 // Minimize store size by trimming trailing zeros.
3275 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3276 while (j <= StoreSizeInBytes / 2)
3277 StoreSizeInBytes /= 2;
3278 }
3279
3280 uint64_t Val = 0;
3281 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3282 if (IsLittleEndian)
3283 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3284 else
3285 Val = (Val << 8) | ShadowBytes[i + j];
3286 }
3287
3288 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3289 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3291 Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
3292 Align(1));
3293
3294 i += StoreSizeInBytes;
3295 }
3296}
3297
3298void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3299 ArrayRef<uint8_t> ShadowBytes,
3300 IRBuilder<> &IRB, Value *ShadowBase) {
3301 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3302}
3303
3304void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3305 ArrayRef<uint8_t> ShadowBytes,
3306 size_t Begin, size_t End,
3307 IRBuilder<> &IRB, Value *ShadowBase) {
3308 assert(ShadowMask.size() == ShadowBytes.size());
3309 size_t Done = Begin;
3310 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3311 if (!ShadowMask[i]) {
3312 assert(!ShadowBytes[i]);
3313 continue;
3314 }
3315 uint8_t Val = ShadowBytes[i];
3316 if (!AsanSetShadowFunc[Val])
3317 continue;
3318
3319 // Skip same values.
3320 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3321 }
3322
3323 if (j - i >= ASan.MaxInlinePoisoningSize) {
3324 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3325 RTCI.createRuntimeCall(
3326 IRB, AsanSetShadowFunc[Val],
3327 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3328 ConstantInt::get(IntptrTy, j - i)});
3329 Done = j;
3330 }
3331 }
3332
3333 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3334}
3335
3336// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3337// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3338static int StackMallocSizeClass(uint64_t LocalStackSize) {
3339 assert(LocalStackSize <= kMaxStackMallocSize);
3340 uint64_t MaxSize = kMinStackMallocSize;
3341 for (int i = 0;; i++, MaxSize *= 2)
3342 if (LocalStackSize <= MaxSize) return i;
3343 llvm_unreachable("impossible LocalStackSize");
3344}
3345
3346void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3347 Instruction *CopyInsertPoint = &F.front().front();
3348 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3349 // Insert after the dynamic shadow location is determined
3350 CopyInsertPoint = CopyInsertPoint->getNextNode();
3351 assert(CopyInsertPoint);
3352 }
3353 IRBuilder<> IRB(CopyInsertPoint);
3354 const DataLayout &DL = F.getDataLayout();
3355 for (Argument &Arg : F.args()) {
3356 if (Arg.hasByValAttr()) {
3357 Type *Ty = Arg.getParamByValType();
3358 const Align Alignment =
3359 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3360
3361 AllocaInst *AI = IRB.CreateAlloca(
3362 Ty, nullptr,
3363 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3364 ".byval");
3365 AI->setAlignment(Alignment);
3366 Arg.replaceAllUsesWith(AI);
3367
3368 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3369 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3370 }
3371 }
3372}
3373
3374PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3375 Value *ValueIfTrue,
3376 Instruction *ThenTerm,
3377 Value *ValueIfFalse) {
3378 PHINode *PHI = IRB.CreatePHI(ValueIfTrue->getType(), 2);
3379 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3380 PHI->addIncoming(ValueIfFalse, CondBlock);
3381 BasicBlock *ThenBlock = ThenTerm->getParent();
3382 PHI->addIncoming(ValueIfTrue, ThenBlock);
3383 return PHI;
3384}
3385
3386Value *FunctionStackPoisoner::createAllocaForLayout(
3387 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3388 AllocaInst *Alloca;
3389 if (Dynamic) {
3390 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3391 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3392 "MyAlloca");
3393 } else {
3394 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3395 nullptr, "MyAlloca");
3396 assert(Alloca->isStaticAlloca());
3397 }
3398 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3399 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3400 Alloca->setAlignment(Align(FrameAlignment));
3401 return Alloca;
3402}
3403
3404void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3405 BasicBlock &FirstBB = *F.begin();
3406 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3407 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3408 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3409 DynamicAllocaLayout->setAlignment(Align(32));
3410}
3411
3412void FunctionStackPoisoner::processDynamicAllocas() {
3413 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3414 assert(DynamicAllocaPoisonCallVec.empty());
3415 return;
3416 }
3417
3418 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3419 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3420 assert(APC.InsBefore);
3421 assert(APC.AI);
3422 assert(ASan.isInterestingAlloca(*APC.AI));
3423 assert(!APC.AI->isStaticAlloca());
3424
3425 IRBuilder<> IRB(APC.InsBefore);
3426 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3427 // Dynamic allocas will be unpoisoned unconditionally below in
3428 // unpoisonDynamicAllocas.
3429 // Flag that we need unpoison static allocas.
3430 }
3431
3432 // Handle dynamic allocas.
3433 createDynamicAllocasInitStorage();
3434 for (auto &AI : DynamicAllocaVec)
3435 handleDynamicAllocaCall(AI);
3436 unpoisonDynamicAllocas();
3437}
3438
3439/// Collect instructions in the entry block after \p InsBefore which initialize
3440/// permanent storage for a function argument. These instructions must remain in
3441/// the entry block so that uninitialized values do not appear in backtraces. An
3442/// added benefit is that this conserves spill slots. This does not move stores
3443/// before instrumented / "interesting" allocas.
3445 AddressSanitizer &ASan, Instruction &InsBefore,
3446 SmallVectorImpl<Instruction *> &InitInsts) {
3447 Instruction *Start = InsBefore.getNextNode();
3448 for (Instruction *It = Start; It; It = It->getNextNode()) {
3449 // Argument initialization looks like:
3450 // 1) store <Argument>, <Alloca> OR
3451 // 2) <CastArgument> = cast <Argument> to ...
3452 // store <CastArgument> to <Alloca>
3453 // Do not consider any other kind of instruction.
3454 //
3455 // Note: This covers all known cases, but may not be exhaustive. An
3456 // alternative to pattern-matching stores is to DFS over all Argument uses:
3457 // this might be more general, but is probably much more complicated.
3458 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3459 continue;
3460 if (auto *Store = dyn_cast<StoreInst>(It)) {
3461 // The store destination must be an alloca that isn't interesting for
3462 // ASan to instrument. These are moved up before InsBefore, and they're
3463 // not interesting because allocas for arguments can be mem2reg'd.
3464 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3465 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3466 continue;
3467
3468 Value *Val = Store->getValueOperand();
3469 bool IsDirectArgInit = isa<Argument>(Val);
3470 bool IsArgInitViaCast =
3471 isa<CastInst>(Val) &&
3472 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3473 // Check that the cast appears directly before the store. Otherwise
3474 // moving the cast before InsBefore may break the IR.
3475 Val == It->getPrevNode();
3476 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3477 if (!IsArgInit)
3478 continue;
3479
3480 if (IsArgInitViaCast)
3481 InitInsts.push_back(cast<Instruction>(Val));
3482 InitInsts.push_back(Store);
3483 continue;
3484 }
3485
3486 // Do not reorder past unknown instructions: argument initialization should
3487 // only involve casts and stores.
3488 return;
3489 }
3490}
3491
3493 // Alloca could have been renamed for uniqueness. Its true name will have been
3494 // recorded as an annotation.
3495 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3496 MDTuple *AllocaAnnotations =
3497 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3498 for (auto &Annotation : AllocaAnnotations->operands()) {
3499 if (!isa<MDTuple>(Annotation))
3500 continue;
3501 auto AnnotationTuple = cast<MDTuple>(Annotation);
3502 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3503 Index++) {
3504 // All annotations are strings
3505 auto MetadataString =
3506 cast<MDString>(AnnotationTuple->getOperand(Index));
3507 if (MetadataString->getString() == "alloca_name_altered")
3508 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3509 ->getString();
3510 }
3511 }
3512 }
3513 return AI->getName();
3514}
3515
3516void FunctionStackPoisoner::processStaticAllocas() {
3517 if (AllocaVec.empty()) {
3518 assert(StaticAllocaPoisonCallVec.empty());
3519 return;
3520 }
3521
3522 int StackMallocIdx = -1;
3523 DebugLoc EntryDebugLocation;
3524 if (auto SP = F.getSubprogram())
3525 EntryDebugLocation =
3526 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3527
3528 Instruction *InsBefore = AllocaVec[0];
3529 IRBuilder<> IRB(InsBefore);
3530
3531 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3532 // debug info is broken, because only entry-block allocas are treated as
3533 // regular stack slots.
3534 auto InsBeforeB = InsBefore->getParent();
3535 assert(InsBeforeB == &F.getEntryBlock());
3536 for (auto *AI : StaticAllocasToMoveUp)
3537 if (AI->getParent() == InsBeforeB)
3538 AI->moveBefore(InsBefore->getIterator());
3539
3540 // Move stores of arguments into entry-block allocas as well. This prevents
3541 // extra stack slots from being generated (to house the argument values until
3542 // they can be stored into the allocas). This also prevents uninitialized
3543 // values from being shown in backtraces.
3544 SmallVector<Instruction *, 8> ArgInitInsts;
3545 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3546 for (Instruction *ArgInitInst : ArgInitInsts)
3547 ArgInitInst->moveBefore(InsBefore->getIterator());
3548
3549 // If we have a call to llvm.localescape, keep it in the entry block.
3550 if (LocalEscapeCall)
3551 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3552
3554 SVD.reserve(AllocaVec.size());
3555 for (AllocaInst *AI : AllocaVec) {
3558 ASan.getAllocaSizeInBytes(*AI),
3559 0,
3560 AI->getAlign().value(),
3561 AI,
3562 0,
3563 0};
3564 SVD.push_back(D);
3565 }
3566
3567 // Minimal header size (left redzone) is 4 pointers,
3568 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3569 uint64_t Granularity = 1ULL << Mapping.Scale;
3570 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3571 const ASanStackFrameLayout &L =
3572 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3573
3574 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3576 for (auto &Desc : SVD)
3577 AllocaToSVDMap[Desc.AI] = &Desc;
3578
3579 // Update SVD with information from lifetime intrinsics.
3580 for (const auto &APC : StaticAllocaPoisonCallVec) {
3581 assert(APC.InsBefore);
3582 assert(APC.AI);
3583 assert(ASan.isInterestingAlloca(*APC.AI));
3584 assert(APC.AI->isStaticAlloca());
3585
3586 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3587 Desc.LifetimeSize = Desc.Size;
3588 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3589 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3590 if (LifetimeLoc->getFile() == FnLoc->getFile())
3591 if (unsigned Line = LifetimeLoc->getLine())
3592 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3593 }
3594 }
3595 }
3596
3597 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3598 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3599 uint64_t LocalStackSize = L.FrameSize;
3600 bool DoStackMalloc =
3601 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3602 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3603 bool DoDynamicAlloca = ClDynamicAllocaStack;
3604 // Don't do dynamic alloca or stack malloc if:
3605 // 1) There is inline asm: too often it makes assumptions on which registers
3606 // are available.
3607 // 2) There is a returns_twice call (typically setjmp), which is
3608 // optimization-hostile, and doesn't play well with introduced indirect
3609 // register-relative calculation of local variable addresses.
3610 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3611 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3612
3613 Type *PtrTy = F.getDataLayout().getAllocaPtrType(F.getContext());
3614 Value *StaticAlloca =
3615 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3616
3617 Value *FakeStackPtr;
3618 Value *FakeStackInt;
3619 Value *LocalStackBase;
3620 Value *LocalStackBaseAlloca;
3621 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3622
3623 if (DoStackMalloc) {
3624 LocalStackBaseAlloca =
3625 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3626 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3627 // void *FakeStack = __asan_option_detect_stack_use_after_return
3628 // ? __asan_stack_malloc_N(LocalStackSize)
3629 // : nullptr;
3630 // void *LocalStackBase = (FakeStack) ? FakeStack :
3631 // alloca(LocalStackSize);
3632 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3634 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3635 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3637 Instruction *Term =
3638 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3639 IRBuilder<> IRBIf(Term);
3640 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3641 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3642 Value *FakeStackValue =
3643 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3644 ConstantInt::get(IntptrTy, LocalStackSize));
3645 IRB.SetInsertPoint(InsBefore);
3646 FakeStackInt = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue,
3647 Term, ConstantInt::get(IntptrTy, 0));
3648 } else {
3649 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3650 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3651 // void *LocalStackBase = (FakeStack) ? FakeStack :
3652 // alloca(LocalStackSize);
3653 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3654 FakeStackInt =
3655 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3656 ConstantInt::get(IntptrTy, LocalStackSize));
3657 }
3658 FakeStackPtr = IRB.CreateIntToPtr(FakeStackInt, PtrTy);
3659 Value *NoFakeStack =
3660 IRB.CreateICmpEQ(FakeStackInt, Constant::getNullValue(IntptrTy));
3661 Instruction *Term =
3662 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3663 IRBuilder<> IRBIf(Term);
3664 Value *AllocaValue =
3665 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3666
3667 IRB.SetInsertPoint(InsBefore);
3668 LocalStackBase =
3669 createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStackPtr);
3670 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3671 DIExprFlags |= DIExpression::DerefBefore;
3672 } else {
3673 // void *FakeStack = nullptr;
3674 // void *LocalStackBase = alloca(LocalStackSize);
3675 FakeStackInt = Constant::getNullValue(IntptrTy);
3676 FakeStackPtr = Constant::getNullValue(PtrTy);
3677 LocalStackBase =
3678 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3679 LocalStackBaseAlloca = LocalStackBase;
3680 }
3681
3682 // Replace Alloca instructions with base+offset.
3683 SmallVector<Value *> NewAllocaPtrs;
3684 for (const auto &Desc : SVD) {
3685 AllocaInst *AI = Desc.AI;
3686 replaceDbgDeclare(AI, LocalStackBaseAlloca, DIB, DIExprFlags, Desc.Offset);
3687 Value *NewAllocaPtr = IRB.CreatePtrAdd(
3688 LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset));
3689 AI->replaceAllUsesWith(NewAllocaPtr);
3690 NewAllocaPtrs.push_back(NewAllocaPtr);
3691 }
3692
3693 // The left-most redzone has enough space for at least 4 pointers.
3694 // Write the Magic value to redzone[0].
3695 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3696 LocalStackBase);
3697 // Write the frame description constant to redzone[1].
3698 Value *BasePlus1 = IRB.CreatePtrAdd(
3699 LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize / 8));
3700 GlobalVariable *StackDescriptionGlobal =
3701 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3702 /*AllowMerging*/ true, genName("stack"));
3703 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3704 IRB.CreateStore(Description, BasePlus1);
3705 // Write the PC to redzone[2].
3706 Value *BasePlus2 = IRB.CreatePtrAdd(
3707 LocalStackBase, ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8));
3708 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3709
3710 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3711
3712 // Poison the stack red zones at the entry.
3713 Value *ShadowBase =
3714 ASan.memToShadow(IRB.CreatePtrToInt(LocalStackBase, IntptrTy), IRB);
3715 // As mask we must use most poisoned case: red zones and after scope.
3716 // As bytes we can use either the same or just red zones only.
3717 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3718
3719 if (!StaticAllocaPoisonCallVec.empty()) {
3720 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3721
3722 // Poison static allocas near lifetime intrinsics.
3723 for (const auto &APC : StaticAllocaPoisonCallVec) {
3724 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3725 assert(Desc.Offset % L.Granularity == 0);
3726 size_t Begin = Desc.Offset / L.Granularity;
3727 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3728
3729 IRBuilder<> IRB(APC.InsBefore);
3730 copyToShadow(ShadowAfterScope,
3731 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3732 IRB, ShadowBase);
3733 }
3734 }
3735
3736 // Remove lifetime markers now that these are no longer allocas.
3737 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3738 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3739 auto *I = cast<Instruction>(U);
3740 if (I->isLifetimeStartOrEnd())
3741 I->eraseFromParent();
3742 }
3743 }
3744
3745 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3746 SmallVector<uint8_t, 64> ShadowAfterReturn;
3747
3748 // (Un)poison the stack before all ret instructions.
3749 for (Instruction *Ret : RetVec) {
3750 IRBuilder<> IRBRet(Ret);
3751 // Mark the current frame as retired.
3752 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3753 LocalStackBase);
3754 if (DoStackMalloc) {
3755 assert(StackMallocIdx >= 0);
3756 // if FakeStack != 0 // LocalStackBase == FakeStack
3757 // // In use-after-return mode, poison the whole stack frame.
3758 // if StackMallocIdx <= 4
3759 // // For small sizes inline the whole thing:
3760 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3761 // **SavedFlagPtr(FakeStack) = 0
3762 // else
3763 // __asan_stack_free_N(FakeStack, LocalStackSize)
3764 // else
3765 // <This is not a fake stack; unpoison the redzones>
3766 Value *Cmp =
3767 IRBRet.CreateICmpNE(FakeStackInt, Constant::getNullValue(IntptrTy));
3768 Instruction *ThenTerm, *ElseTerm;
3769 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3770
3771 IRBuilder<> IRBPoison(ThenTerm);
3772 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3773 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3774 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3776 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3777 ShadowBase);
3778 Value *SavedFlagPtrPtr = IRBPoison.CreatePtrAdd(
3779 FakeStackPtr,
3780 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3781 Value *SavedFlagPtr = IRBPoison.CreateLoad(IntptrTy, SavedFlagPtrPtr);
3782 IRBPoison.CreateStore(
3783 Constant::getNullValue(IRBPoison.getInt8Ty()),
3784 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3785 } else {
3786 // For larger frames call __asan_stack_free_*.
3787 RTCI.createRuntimeCall(
3788 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3789 {FakeStackInt, ConstantInt::get(IntptrTy, LocalStackSize)});
3790 }
3791
3792 IRBuilder<> IRBElse(ElseTerm);
3793 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3794 } else {
3795 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3796 }
3797 }
3798
3799 // We are done. Remove the old unused alloca instructions.
3800 for (auto *AI : AllocaVec)
3801 AI->eraseFromParent();
3802}
3803
3804void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3805 IRBuilder<> &IRB, bool DoPoison) {
3806 // For now just insert the call to ASan runtime.
3807 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3808 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3809 RTCI.createRuntimeCall(
3810 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3811 {AddrArg, SizeArg});
3812}
3813
3814// Handling llvm.lifetime intrinsics for a given %alloca:
3815// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3816// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3817// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3818// could be poisoned by previous llvm.lifetime.end instruction, as the
3819// variable may go in and out of scope several times, e.g. in loops).
3820// (3) if we poisoned at least one %alloca in a function,
3821// unpoison the whole stack frame at function exit.
3822void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3823 IRBuilder<> IRB(AI);
3824
3825 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3826 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3827
3828 Value *Zero = Constant::getNullValue(IntptrTy);
3829 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3830 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3831
3832 // Since we need to extend alloca with additional memory to locate
3833 // redzones, and OldSize is number of allocated blocks with
3834 // ElementSize size, get allocated memory size in bytes by
3835 // OldSize * ElementSize.
3836 Value *OldSize = IRB.CreateAllocationSize(IntptrTy, AI);
3837
3838 // PartialSize = OldSize % 32
3839 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3840
3841 // Misalign = kAllocaRzSize - PartialSize;
3842 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3843
3844 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3845 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3846 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3847
3848 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3849 // Alignment is added to locate left redzone, PartialPadding for possible
3850 // partial redzone and kAllocaRzSize for right redzone respectively.
3851 Value *AdditionalChunkSize = IRB.CreateAdd(
3852 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3853 PartialPadding);
3854
3855 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3856
3857 // Insert new alloca with new NewSize and Alignment params.
3858 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3859 NewAlloca->setAlignment(Alignment);
3860
3861 // NewAddress = Address + Alignment
3862 Value *NewAddress =
3863 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3864 ConstantInt::get(IntptrTy, Alignment.value()));
3865
3866 // Insert __asan_alloca_poison call for new created alloca.
3867 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3868
3869 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3870 // for unpoisoning stuff.
3871 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3872
3873 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3874
3875 // Remove lifetime markers now that this is no longer an alloca.
3876 for (User *U : make_early_inc_range(AI->users())) {
3877 auto *I = cast<Instruction>(U);
3878 if (I->isLifetimeStartOrEnd())
3879 I->eraseFromParent();
3880 }
3881
3882 // Replace all uses of AddressReturnedByAlloca with NewAddressPtr.
3883 AI->replaceAllUsesWith(NewAddressPtr);
3884
3885 // We are done. Erase old alloca from parent.
3886 AI->eraseFromParent();
3887}
3888
3889// isSafeAccess returns true if Addr is always inbounds with respect to its
3890// base object. For example, it is a field access or an array access with
3891// constant inbounds index.
3892bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3893 Value *Addr, TypeSize TypeStoreSize) const {
3894 if (TypeStoreSize.isScalable())
3895 // TODO: We can use vscale_range to convert a scalable value to an
3896 // upper bound on the access size.
3897 return false;
3898
3899 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3900 if (!SizeOffset.bothKnown())
3901 return false;
3902
3903 uint64_t Size = SizeOffset.Size.getZExtValue();
3904 int64_t Offset = SizeOffset.Offset.getSExtValue();
3905
3906 // Three checks are required to ensure safety:
3907 // . Offset >= 0 (since the offset is given from the base ptr)
3908 // . Size >= Offset (unsigned)
3909 // . Size - Offset >= NeededSize (unsigned)
3910 return Offset >= 0 && Size >= uint64_t(Offset) &&
3911 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3912}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< int > ClShadowAddrSpace("asan-shadow-addr-space", cl::desc("Address space for pointers to the shadow map"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static cl::list< unsigned > ClAddrSpaces("asan-instrument-address-spaces", cl::desc("Only instrument variables in the specified address spaces."), cl::Hidden, cl::CommaSeparated, cl::ZeroOrMore, cl::callback([](const unsigned &AddrSpace) { SrcAddrSpaces.insert(AddrSpace);}))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static bool isSupportedAddrspace(const Triple &TargetTriple, Value *Addr)
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static SmallSet< unsigned, 8 > SrcAddrSpaces
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCannotMerge()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition Comdat.h:38
ConstantArray - Constant Array Declarations.
Definition Constants.h:438
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition Function.h:860
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition Function.cpp:379
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:613
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:215
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:276
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition Globals.cpp:568
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1837
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:546
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1871
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:686
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2223
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2336
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2171
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1516
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:561
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2025
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:566
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2312
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1944
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2473
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1812
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2308
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1423
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended from a 64-bit value.
Definition IRBuilder.h:532
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1854
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1554
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1867
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1406
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2166
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition IRBuilder.h:2641
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2487
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2249
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:599
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1890
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1576
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:551
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2181
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1440
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2787
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Metadata node.
Definition Metadata.h:1080
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
Tuple of metadata.
Definition Metadata.h:1500
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:413
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
EltTy front() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition Triple.h:948
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition Triple.h:622
bool isBPF() const
Tests whether the target is eBPF.
Definition Triple.h:1194
bool isOSNetBSD() const
Definition Triple.h:659
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:855
bool isABIN32() const
Definition Triple.h:1182
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition Triple.h:1078
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:419
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition Triple.h:1067
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition Triple.h:1073
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:708
@ UnknownObjectFormat
Definition Triple.h:326
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition Triple.h:953
bool isOSLinux() const
Tests whether the OS is Linux.
Definition Triple.h:757
bool isAMDGPU() const
Definition Triple.h:945
bool isMacOSX() const
Is this a Mac OS X triple.
Definition Triple.h:588
bool isOSFreeBSD() const
Definition Triple.h:667
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition Triple.h:778
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition Triple.h:607
bool isiOS() const
Is this an iOS triple.
Definition Triple.h:597
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition Triple.h:852
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition Triple.h:1166
bool isOSFuchsia() const
Definition Triple.h:671
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition Triple.h:698
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1124
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
cb< typename detail::callback_traits< F >::result_type, typename detail::callback_traits< F >::arg_type > callback(F CB)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
TargetTransformInfo TTI
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3895
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition Demangle.cpp:20
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1960
#define N
LLVM_ABI ASanAccessInfo(int32_t Packed)
const uint8_t AccessSizeIndex
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.