LLVM 20.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Comdat.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DIBuilder.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalAlias.h"
49#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/IRBuilder.h"
52#include "llvm/IR/InlineAsm.h"
53#include "llvm/IR/InstVisitor.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
63#include "llvm/IR/Type.h"
64#include "llvm/IR/Use.h"
65#include "llvm/IR/Value.h"
69#include "llvm/Support/Debug.h"
82#include <algorithm>
83#include <cassert>
84#include <cstddef>
85#include <cstdint>
86#include <iomanip>
87#include <limits>
88#include <sstream>
89#include <string>
90#include <tuple>
91
92using namespace llvm;
93
94#define DEBUG_TYPE "asan"
95
97static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
98static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
100 std::numeric_limits<uint64_t>::max();
101static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
103static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
104static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
105static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
106static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
107static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
108static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
109static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
110static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
112static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
113static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
114static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
115static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
116static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
117static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
118static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
119static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
120static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
122
123// The shadow memory space is dynamically allocated.
125
126static const size_t kMinStackMallocSize = 1 << 6; // 64B
127static const size_t kMaxStackMallocSize = 1 << 16; // 64K
128static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
129static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
130
131const char kAsanModuleCtorName[] = "asan.module_ctor";
132const char kAsanModuleDtorName[] = "asan.module_dtor";
134// On Emscripten, the system needs more than one priorities for constructors.
136const char kAsanReportErrorTemplate[] = "__asan_report_";
137const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
138const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
139const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
141 "__asan_unregister_image_globals";
142const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
143const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
144const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
145const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
146const char kAsanInitName[] = "__asan_init";
147const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
148const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
149const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
150const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
151static const int kMaxAsanStackMallocSizeClass = 10;
152const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
154 "__asan_stack_malloc_always_";
155const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
156const char kAsanGenPrefix[] = "___asan_gen_";
157const char kODRGenPrefix[] = "__odr_asan_gen_";
158const char kSanCovGenPrefix[] = "__sancov_gen_";
159const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
160const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
161const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
162
163// ASan version script has __asan_* wildcard. Triple underscore prevents a
164// linker (gold) warning about attempting to export a local symbol.
165const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
166
168 "__asan_option_detect_stack_use_after_return";
169
171 "__asan_shadow_memory_dynamic_address";
172
173const char kAsanAllocaPoison[] = "__asan_alloca_poison";
174const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
175
176const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
177const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
178const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
179const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
180
181// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
182static const size_t kNumberOfAccessSizes = 5;
183
184static const uint64_t kAllocaRzSize = 32;
185
186// ASanAccessInfo implementation constants.
187constexpr size_t kCompileKernelShift = 0;
188constexpr size_t kCompileKernelMask = 0x1;
189constexpr size_t kAccessSizeIndexShift = 1;
190constexpr size_t kAccessSizeIndexMask = 0xf;
191constexpr size_t kIsWriteShift = 5;
192constexpr size_t kIsWriteMask = 0x1;
193
194// Command-line flags.
195
197 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
198 cl::Hidden, cl::init(false));
199
201 "asan-recover",
202 cl::desc("Enable recovery mode (continue-after-error)."),
203 cl::Hidden, cl::init(false));
204
206 "asan-guard-against-version-mismatch",
207 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
208 cl::init(true));
209
210// This flag may need to be replaced with -f[no-]asan-reads.
211static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
212 cl::desc("instrument read instructions"),
213 cl::Hidden, cl::init(true));
214
216 "asan-instrument-writes", cl::desc("instrument write instructions"),
217 cl::Hidden, cl::init(true));
218
219static cl::opt<bool>
220 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
221 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
223
225 "asan-instrument-atomics",
226 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
227 cl::init(true));
228
229static cl::opt<bool>
230 ClInstrumentByval("asan-instrument-byval",
231 cl::desc("instrument byval call arguments"), cl::Hidden,
232 cl::init(true));
233
235 "asan-always-slow-path",
236 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
237 cl::init(false));
238
240 "asan-force-dynamic-shadow",
241 cl::desc("Load shadow address into a local variable for each function"),
242 cl::Hidden, cl::init(false));
243
244static cl::opt<bool>
245 ClWithIfunc("asan-with-ifunc",
246 cl::desc("Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
248 cl::Hidden, cl::init(true));
249
251 "asan-with-ifunc-suppress-remat",
252 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
254 cl::Hidden, cl::init(true));
255
256// This flag limits the number of instructions to be instrumented
257// in any given BB. Normally, this should be set to unlimited (INT_MAX),
258// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
259// set it to 10000.
261 "asan-max-ins-per-bb", cl::init(10000),
262 cl::desc("maximal number of instructions to instrument in any given BB"),
263 cl::Hidden);
264
265// This flag may need to be replaced with -f[no]asan-stack.
266static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
267 cl::Hidden, cl::init(true));
269 "asan-max-inline-poisoning-size",
270 cl::desc(
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
272 cl::Hidden, cl::init(64));
273
275 "asan-use-after-return",
276 cl::desc("Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
285 "Always detect stack use after return.")),
286 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
287
288static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
289 cl::desc("Create redzones for byval "
290 "arguments (extra copy "
291 "required)"), cl::Hidden,
292 cl::init(true));
293
294static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
295 cl::desc("Check stack-use-after-scope"),
296 cl::Hidden, cl::init(false));
297
298// This flag may need to be replaced with -f[no]asan-globals.
299static cl::opt<bool> ClGlobals("asan-globals",
300 cl::desc("Handle global objects"), cl::Hidden,
301 cl::init(true));
302
303static cl::opt<bool> ClInitializers("asan-initialization-order",
304 cl::desc("Handle C++ initializer order"),
305 cl::Hidden, cl::init(true));
306
308 "asan-detect-invalid-pointer-pair",
309 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
310 cl::init(false));
311
313 "asan-detect-invalid-pointer-cmp",
314 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
315 cl::init(false));
316
318 "asan-detect-invalid-pointer-sub",
319 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
320 cl::init(false));
321
323 "asan-realign-stack",
324 cl::desc("Realign stack to the value of this flag (power of two)"),
325 cl::Hidden, cl::init(32));
326
328 "asan-instrumentation-with-call-threshold",
329 cl::desc("If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
332 cl::Hidden, cl::init(7000));
333
335 "asan-memory-access-callback-prefix",
336 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
337 cl::init("__asan_"));
338
340 "asan-kernel-mem-intrinsic-prefix",
341 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
342 cl::init(false));
343
344static cl::opt<bool>
345 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
346 cl::desc("instrument dynamic allocas"),
347 cl::Hidden, cl::init(true));
348
350 "asan-skip-promotable-allocas",
351 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
352 cl::init(true));
353
355 "asan-constructor-kind",
356 cl::desc("Sets the ASan constructor kind"),
357 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
358 clEnumValN(AsanCtorKind::Global, "global",
359 "Use global constructors")),
360 cl::init(AsanCtorKind::Global), cl::Hidden);
361// These flags allow to change the shadow mapping.
362// The shadow mapping looks like
363// Shadow = (Mem >> scale) + offset
364
365static cl::opt<int> ClMappingScale("asan-mapping-scale",
366 cl::desc("scale of asan shadow mapping"),
367 cl::Hidden, cl::init(0));
368
370 ClMappingOffset("asan-mapping-offset",
371 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
372 cl::Hidden, cl::init(0));
373
374// Optimization flags. Not user visible, used mostly for testing
375// and benchmarking the tool.
376
377static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
378 cl::Hidden, cl::init(true));
379
380static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
381 cl::desc("Optimize callbacks"),
382 cl::Hidden, cl::init(false));
383
385 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
386 cl::Hidden, cl::init(true));
387
388static cl::opt<bool> ClOptGlobals("asan-opt-globals",
389 cl::desc("Don't instrument scalar globals"),
390 cl::Hidden, cl::init(true));
391
393 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
394 cl::Hidden, cl::init(false));
395
397 "asan-stack-dynamic-alloca",
398 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
399 cl::init(true));
400
402 "asan-force-experiment",
403 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
404 cl::init(0));
405
406static cl::opt<bool>
407 ClUsePrivateAlias("asan-use-private-alias",
408 cl::desc("Use private aliases for global variables"),
409 cl::Hidden, cl::init(true));
410
411static cl::opt<bool>
412 ClUseOdrIndicator("asan-use-odr-indicator",
413 cl::desc("Use odr indicators to improve ODR reporting"),
414 cl::Hidden, cl::init(true));
415
416static cl::opt<bool>
417 ClUseGlobalsGC("asan-globals-live-support",
418 cl::desc("Use linker features to support dead "
419 "code stripping of globals"),
420 cl::Hidden, cl::init(true));
421
422// This is on by default even though there is a bug in gold:
423// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
424static cl::opt<bool>
425 ClWithComdat("asan-with-comdat",
426 cl::desc("Place ASan constructors in comdat sections"),
427 cl::Hidden, cl::init(true));
428
430 "asan-destructor-kind",
431 cl::desc("Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
433 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
434 clEnumValN(AsanDtorKind::Global, "global",
435 "Use global destructors")),
436 cl::init(AsanDtorKind::Invalid), cl::Hidden);
437
438// Debug flags.
439
440static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
441 cl::init(0));
442
443static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
444 cl::Hidden, cl::init(0));
445
447 cl::desc("Debug func"));
448
449static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
450 cl::Hidden, cl::init(-1));
451
452static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
453 cl::Hidden, cl::init(-1));
454
455STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
457STATISTIC(NumOptimizedAccessesToGlobalVar,
458 "Number of optimized accesses to global vars");
459STATISTIC(NumOptimizedAccessesToStackVar,
460 "Number of optimized accesses to stack vars");
461
462namespace {
463
464/// This struct defines the shadow mapping using the rule:
465/// shadow = (mem >> Scale) ADD-or-OR Offset.
466/// If InGlobal is true, then
467/// extern char __asan_shadow[];
468/// shadow = (mem >> Scale) + &__asan_shadow
469struct ShadowMapping {
470 int Scale;
471 uint64_t Offset;
472 bool OrShadowOffset;
473 bool InGlobal;
474};
475
476} // end anonymous namespace
477
478static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
479 bool IsKasan) {
480 bool IsAndroid = TargetTriple.isAndroid();
481 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
482 TargetTriple.isDriverKit();
483 bool IsMacOS = TargetTriple.isMacOSX();
484 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
485 bool IsNetBSD = TargetTriple.isOSNetBSD();
486 bool IsPS = TargetTriple.isPS();
487 bool IsLinux = TargetTriple.isOSLinux();
488 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
489 TargetTriple.getArch() == Triple::ppc64le;
490 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
491 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
492 bool IsMIPSN32ABI = TargetTriple.getEnvironment() == Triple::GNUABIN32;
493 bool IsMIPS32 = TargetTriple.isMIPS32();
494 bool IsMIPS64 = TargetTriple.isMIPS64();
495 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
496 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
497 TargetTriple.getArch() == Triple::aarch64_be;
498 bool IsLoongArch64 = TargetTriple.isLoongArch64();
499 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
500 bool IsWindows = TargetTriple.isOSWindows();
501 bool IsFuchsia = TargetTriple.isOSFuchsia();
502 bool IsEmscripten = TargetTriple.isOSEmscripten();
503 bool IsAMDGPU = TargetTriple.isAMDGPU();
504
505 ShadowMapping Mapping;
506
507 Mapping.Scale = kDefaultShadowScale;
508 if (ClMappingScale.getNumOccurrences() > 0) {
509 Mapping.Scale = ClMappingScale;
510 }
511
512 if (LongSize == 32) {
513 if (IsAndroid)
514 Mapping.Offset = kDynamicShadowSentinel;
515 else if (IsMIPSN32ABI)
516 Mapping.Offset = kMIPS_ShadowOffsetN32;
517 else if (IsMIPS32)
518 Mapping.Offset = kMIPS32_ShadowOffset32;
519 else if (IsFreeBSD)
520 Mapping.Offset = kFreeBSD_ShadowOffset32;
521 else if (IsNetBSD)
522 Mapping.Offset = kNetBSD_ShadowOffset32;
523 else if (IsIOS)
524 Mapping.Offset = kDynamicShadowSentinel;
525 else if (IsWindows)
526 Mapping.Offset = kWindowsShadowOffset32;
527 else if (IsEmscripten)
528 Mapping.Offset = kEmscriptenShadowOffset;
529 else
530 Mapping.Offset = kDefaultShadowOffset32;
531 } else { // LongSize == 64
532 // Fuchsia is always PIE, which means that the beginning of the address
533 // space is always available.
534 if (IsFuchsia)
535 Mapping.Offset = 0;
536 else if (IsPPC64)
537 Mapping.Offset = kPPC64_ShadowOffset64;
538 else if (IsSystemZ)
539 Mapping.Offset = kSystemZ_ShadowOffset64;
540 else if (IsFreeBSD && IsAArch64)
541 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
542 else if (IsFreeBSD && !IsMIPS64) {
543 if (IsKasan)
544 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
545 else
546 Mapping.Offset = kFreeBSD_ShadowOffset64;
547 } else if (IsNetBSD) {
548 if (IsKasan)
549 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
550 else
551 Mapping.Offset = kNetBSD_ShadowOffset64;
552 } else if (IsPS)
553 Mapping.Offset = kPS_ShadowOffset64;
554 else if (IsLinux && IsX86_64) {
555 if (IsKasan)
556 Mapping.Offset = kLinuxKasan_ShadowOffset64;
557 else
558 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
559 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
560 } else if (IsWindows && IsX86_64) {
561 Mapping.Offset = kWindowsShadowOffset64;
562 } else if (IsMIPS64)
563 Mapping.Offset = kMIPS64_ShadowOffset64;
564 else if (IsIOS)
565 Mapping.Offset = kDynamicShadowSentinel;
566 else if (IsMacOS && IsAArch64)
567 Mapping.Offset = kDynamicShadowSentinel;
568 else if (IsAArch64)
569 Mapping.Offset = kAArch64_ShadowOffset64;
570 else if (IsLoongArch64)
571 Mapping.Offset = kLoongArch64_ShadowOffset64;
572 else if (IsRISCV64)
573 Mapping.Offset = kRISCV64_ShadowOffset64;
574 else if (IsAMDGPU)
575 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
576 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
577 else
578 Mapping.Offset = kDefaultShadowOffset64;
579 }
580
582 Mapping.Offset = kDynamicShadowSentinel;
583 }
584
585 if (ClMappingOffset.getNumOccurrences() > 0) {
586 Mapping.Offset = ClMappingOffset;
587 }
588
589 // OR-ing shadow offset if more efficient (at least on x86) if the offset
590 // is a power of two, but on ppc64 and loongarch64 we have to use add since
591 // the shadow offset is not necessarily 1/8-th of the address space. On
592 // SystemZ, we could OR the constant in a single instruction, but it's more
593 // efficient to load it once and use indexed addressing.
594 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
595 !IsRISCV64 && !IsLoongArch64 &&
596 !(Mapping.Offset & (Mapping.Offset - 1)) &&
597 Mapping.Offset != kDynamicShadowSentinel;
598 bool IsAndroidWithIfuncSupport =
599 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
600 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
601
602 return Mapping;
603}
604
605namespace llvm {
606void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
607 bool IsKasan, uint64_t *ShadowBase,
608 int *MappingScale, bool *OrShadowOffset) {
609 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
610 *ShadowBase = Mapping.Offset;
611 *MappingScale = Mapping.Scale;
612 *OrShadowOffset = Mapping.OrShadowOffset;
613}
614
616 : Packed(Packed),
617 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
618 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
619 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
620
621ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
622 uint8_t AccessSizeIndex)
623 : Packed((IsWrite << kIsWriteShift) +
624 (CompileKernel << kCompileKernelShift) +
625 (AccessSizeIndex << kAccessSizeIndexShift)),
626 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
627 CompileKernel(CompileKernel) {}
628
629} // namespace llvm
630
631static uint64_t getRedzoneSizeForScale(int MappingScale) {
632 // Redzone used for stack and globals is at least 32 bytes.
633 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
634 return std::max(32U, 1U << MappingScale);
635}
636
638 if (TargetTriple.isOSEmscripten()) {
640 } else {
642 }
643}
644
645static Twine genName(StringRef suffix) {
646 return Twine(kAsanGenPrefix) + suffix;
647}
648
649namespace {
650/// Helper RAII class to post-process inserted asan runtime calls during a
651/// pass on a single Function. Upon end of scope, detects and applies the
652/// required funclet OpBundle.
653class RuntimeCallInserter {
654 Function *OwnerFn = nullptr;
655 bool TrackInsertedCalls = false;
656 SmallVector<CallInst *> InsertedCalls;
657
658public:
659 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
660 if (Fn.hasPersonalityFn()) {
661 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
662 if (isScopedEHPersonality(Personality))
663 TrackInsertedCalls = true;
664 }
665 }
666
667 ~RuntimeCallInserter() {
668 if (InsertedCalls.empty())
669 return;
670 assert(TrackInsertedCalls && "Calls were wrongly tracked");
671
673 for (CallInst *CI : InsertedCalls) {
674 BasicBlock *BB = CI->getParent();
675 assert(BB && "Instruction doesn't belong to a BasicBlock");
676 assert(BB->getParent() == OwnerFn &&
677 "Instruction doesn't belong to the expected Function!");
678
679 ColorVector &Colors = BlockColors[BB];
680 // funclet opbundles are only valid in monochromatic BBs.
681 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
682 // and will be DCE'ed later.
683 if (Colors.empty())
684 continue;
685 if (Colors.size() != 1) {
686 OwnerFn->getContext().emitError(
687 "Instruction's BasicBlock is not monochromatic");
688 continue;
689 }
690
691 BasicBlock *Color = Colors.front();
692 Instruction *EHPad = Color->getFirstNonPHI();
693
694 if (EHPad && EHPad->isEHPad()) {
695 // Replace CI with a clone with an added funclet OperandBundle
696 OperandBundleDef OB("funclet", EHPad);
698 OB, CI->getIterator());
699 NewCall->copyMetadata(*CI);
700 CI->replaceAllUsesWith(NewCall);
701 CI->eraseFromParent();
702 }
703 }
704 }
705
706 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
707 ArrayRef<Value *> Args = {},
708 const Twine &Name = "") {
709 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
710
711 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
712 if (TrackInsertedCalls)
713 InsertedCalls.push_back(Inst);
714 return Inst;
715 }
716};
717
718/// AddressSanitizer: instrument the code in module to find memory bugs.
719struct AddressSanitizer {
720 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
721 int InstrumentationWithCallsThreshold,
722 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
723 bool Recover = false, bool UseAfterScope = false,
724 AsanDetectStackUseAfterReturnMode UseAfterReturn =
725 AsanDetectStackUseAfterReturnMode::Runtime)
726 : M(M),
727 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
728 : CompileKernel),
729 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
730 UseAfterScope(UseAfterScope || ClUseAfterScope),
731 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
732 : UseAfterReturn),
733 SSGI(SSGI),
734 InstrumentationWithCallsThreshold(
735 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
737 : InstrumentationWithCallsThreshold),
738 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
740 : MaxInlinePoisoningSize) {
741 C = &(M.getContext());
742 DL = &M.getDataLayout();
743 LongSize = M.getDataLayout().getPointerSizeInBits();
744 IntptrTy = Type::getIntNTy(*C, LongSize);
745 PtrTy = PointerType::getUnqual(*C);
746 Int32Ty = Type::getInt32Ty(*C);
747 TargetTriple = Triple(M.getTargetTriple());
748
749 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
750
751 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
752 }
753
754 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
755 return *AI.getAllocationSize(AI.getDataLayout());
756 }
757
758 /// Check if we want (and can) handle this alloca.
759 bool isInterestingAlloca(const AllocaInst &AI);
760
761 bool ignoreAccess(Instruction *Inst, Value *Ptr);
764
765 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
766 InterestingMemoryOperand &O, bool UseCalls,
767 const DataLayout &DL, RuntimeCallInserter &RTCI);
768 void instrumentPointerComparisonOrSubtraction(Instruction *I,
769 RuntimeCallInserter &RTCI);
770 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
771 Value *Addr, MaybeAlign Alignment,
772 uint32_t TypeStoreSize, bool IsWrite,
773 Value *SizeArgument, bool UseCalls, uint32_t Exp,
774 RuntimeCallInserter &RTCI);
775 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
776 Instruction *InsertBefore, Value *Addr,
777 uint32_t TypeStoreSize, bool IsWrite,
778 Value *SizeArgument);
779 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
780 bool Recover);
781 void instrumentUnusualSizeOrAlignment(Instruction *I,
782 Instruction *InsertBefore, Value *Addr,
783 TypeSize TypeStoreSize, bool IsWrite,
784 Value *SizeArgument, bool UseCalls,
785 uint32_t Exp,
786 RuntimeCallInserter &RTCI);
787 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
788 Type *IntptrTy, Value *Mask, Value *EVL,
789 Value *Stride, Instruction *I, Value *Addr,
790 MaybeAlign Alignment, unsigned Granularity,
791 Type *OpType, bool IsWrite,
792 Value *SizeArgument, bool UseCalls,
793 uint32_t Exp, RuntimeCallInserter &RTCI);
794 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
795 Value *ShadowValue, uint32_t TypeStoreSize);
796 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
797 bool IsWrite, size_t AccessSizeIndex,
798 Value *SizeArgument, uint32_t Exp,
799 RuntimeCallInserter &RTCI);
800 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
801 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
802 bool suppressInstrumentationSiteForDebug(int &Instrumented);
803 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
804 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
805 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
806 void markEscapedLocalAllocas(Function &F);
807
808private:
809 friend struct FunctionStackPoisoner;
810
811 void initializeCallbacks(const TargetLibraryInfo *TLI);
812
813 bool LooksLikeCodeInBug11395(Instruction *I);
814 bool GlobalIsLinkerInitialized(GlobalVariable *G);
815 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
816 TypeSize TypeStoreSize) const;
817
818 /// Helper to cleanup per-function state.
819 struct FunctionStateRAII {
820 AddressSanitizer *Pass;
821
822 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
823 assert(Pass->ProcessedAllocas.empty() &&
824 "last pass forgot to clear cache");
825 assert(!Pass->LocalDynamicShadow);
826 }
827
828 ~FunctionStateRAII() {
829 Pass->LocalDynamicShadow = nullptr;
830 Pass->ProcessedAllocas.clear();
831 }
832 };
833
834 Module &M;
835 LLVMContext *C;
836 const DataLayout *DL;
837 Triple TargetTriple;
838 int LongSize;
839 bool CompileKernel;
840 bool Recover;
841 bool UseAfterScope;
843 Type *IntptrTy;
844 Type *Int32Ty;
845 PointerType *PtrTy;
846 ShadowMapping Mapping;
847 FunctionCallee AsanHandleNoReturnFunc;
848 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
849 Constant *AsanShadowGlobal;
850
851 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
852 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
853 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
854
855 // These arrays is indexed by AccessIsWrite and Experiment.
856 FunctionCallee AsanErrorCallbackSized[2][2];
857 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
858
859 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
860 Value *LocalDynamicShadow = nullptr;
861 const StackSafetyGlobalInfo *SSGI;
862 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
863
864 FunctionCallee AMDGPUAddressShared;
865 FunctionCallee AMDGPUAddressPrivate;
866 int InstrumentationWithCallsThreshold;
867 uint32_t MaxInlinePoisoningSize;
868};
869
870class ModuleAddressSanitizer {
871public:
872 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
873 bool CompileKernel = false, bool Recover = false,
874 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
875 AsanDtorKind DestructorKind = AsanDtorKind::Global,
876 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
877 : M(M),
878 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
879 : CompileKernel),
880 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
882 : InsertVersionCheck),
883 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
884 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
885 // Enable aliases as they should have no downside with ODR indicators.
886 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
888 : UseOdrIndicator),
889 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
891 : UseOdrIndicator),
892 // Not a typo: ClWithComdat is almost completely pointless without
893 // ClUseGlobalsGC (because then it only works on modules without
894 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
895 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
896 // argument is designed as workaround. Therefore, disable both
897 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
898 // do globals-gc.
899 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
900 DestructorKind(DestructorKind),
901 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
903 : ConstructorKind) {
904 C = &(M.getContext());
905 int LongSize = M.getDataLayout().getPointerSizeInBits();
906 IntptrTy = Type::getIntNTy(*C, LongSize);
907 PtrTy = PointerType::getUnqual(*C);
908 TargetTriple = Triple(M.getTargetTriple());
909 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
910
911 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
912 this->DestructorKind = ClOverrideDestructorKind;
913 assert(this->DestructorKind != AsanDtorKind::Invalid);
914 }
915
916 bool instrumentModule();
917
918private:
919 void initializeCallbacks();
920
921 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
922 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
923 ArrayRef<GlobalVariable *> ExtendedGlobals,
924 ArrayRef<Constant *> MetadataInitializers);
925 void instrumentGlobalsELF(IRBuilder<> &IRB,
926 ArrayRef<GlobalVariable *> ExtendedGlobals,
927 ArrayRef<Constant *> MetadataInitializers,
928 const std::string &UniqueModuleId);
929 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
930 ArrayRef<GlobalVariable *> ExtendedGlobals,
931 ArrayRef<Constant *> MetadataInitializers);
932 void
933 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
934 ArrayRef<GlobalVariable *> ExtendedGlobals,
935 ArrayRef<Constant *> MetadataInitializers);
936
937 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
938 StringRef OriginalName);
939 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
940 StringRef InternalSuffix);
941 Instruction *CreateAsanModuleDtor();
942
943 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
944 bool shouldInstrumentGlobal(GlobalVariable *G) const;
945 bool ShouldUseMachOGlobalsSection() const;
946 StringRef getGlobalMetadataSection() const;
947 void poisonOneInitializer(Function &GlobalInit);
948 void createInitializerPoisonCalls();
949 uint64_t getMinRedzoneSizeForGlobal() const {
950 return getRedzoneSizeForScale(Mapping.Scale);
951 }
952 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
953 int GetAsanVersion() const;
954 GlobalVariable *getOrCreateModuleName();
955
956 Module &M;
957 bool CompileKernel;
958 bool InsertVersionCheck;
959 bool Recover;
960 bool UseGlobalsGC;
961 bool UsePrivateAlias;
962 bool UseOdrIndicator;
963 bool UseCtorComdat;
964 AsanDtorKind DestructorKind;
965 AsanCtorKind ConstructorKind;
966 Type *IntptrTy;
967 PointerType *PtrTy;
968 LLVMContext *C;
969 Triple TargetTriple;
970 ShadowMapping Mapping;
971 FunctionCallee AsanPoisonGlobals;
972 FunctionCallee AsanUnpoisonGlobals;
973 FunctionCallee AsanRegisterGlobals;
974 FunctionCallee AsanUnregisterGlobals;
975 FunctionCallee AsanRegisterImageGlobals;
976 FunctionCallee AsanUnregisterImageGlobals;
977 FunctionCallee AsanRegisterElfGlobals;
978 FunctionCallee AsanUnregisterElfGlobals;
979
980 Function *AsanCtorFunction = nullptr;
981 Function *AsanDtorFunction = nullptr;
982 GlobalVariable *ModuleName = nullptr;
983};
984
985// Stack poisoning does not play well with exception handling.
986// When an exception is thrown, we essentially bypass the code
987// that unpoisones the stack. This is why the run-time library has
988// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
989// stack in the interceptor. This however does not work inside the
990// actual function which catches the exception. Most likely because the
991// compiler hoists the load of the shadow value somewhere too high.
992// This causes asan to report a non-existing bug on 453.povray.
993// It sounds like an LLVM bug.
994struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
995 Function &F;
996 AddressSanitizer &ASan;
997 RuntimeCallInserter &RTCI;
998 DIBuilder DIB;
999 LLVMContext *C;
1000 Type *IntptrTy;
1001 Type *IntptrPtrTy;
1002 ShadowMapping Mapping;
1003
1005 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1007
1008 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1009 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1010 FunctionCallee AsanSetShadowFunc[0x100] = {};
1011 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1012 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1013
1014 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1015 struct AllocaPoisonCall {
1016 IntrinsicInst *InsBefore;
1017 AllocaInst *AI;
1018 uint64_t Size;
1019 bool DoPoison;
1020 };
1021 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1022 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1023 bool HasUntracedLifetimeIntrinsic = false;
1024
1025 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1026 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1027 AllocaInst *DynamicAllocaLayout = nullptr;
1028 IntrinsicInst *LocalEscapeCall = nullptr;
1029
1030 bool HasInlineAsm = false;
1031 bool HasReturnsTwiceCall = false;
1032 bool PoisonStack;
1033
1034 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1035 RuntimeCallInserter &RTCI)
1036 : F(F), ASan(ASan), RTCI(RTCI),
1037 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1038 IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
1039 Mapping(ASan.Mapping),
1040 PoisonStack(ClStack &&
1041 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
1042
1043 bool runOnFunction() {
1044 if (!PoisonStack)
1045 return false;
1046
1048 copyArgsPassedByValToAllocas();
1049
1050 // Collect alloca, ret, lifetime instructions etc.
1051 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1052
1053 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1054
1055 initializeCallbacks(*F.getParent());
1056
1057 if (HasUntracedLifetimeIntrinsic) {
1058 // If there are lifetime intrinsics which couldn't be traced back to an
1059 // alloca, we may not know exactly when a variable enters scope, and
1060 // therefore should "fail safe" by not poisoning them.
1061 StaticAllocaPoisonCallVec.clear();
1062 DynamicAllocaPoisonCallVec.clear();
1063 }
1064
1065 processDynamicAllocas();
1066 processStaticAllocas();
1067
1068 if (ClDebugStack) {
1069 LLVM_DEBUG(dbgs() << F);
1070 }
1071 return true;
1072 }
1073
1074 // Arguments marked with the "byval" attribute are implicitly copied without
1075 // using an alloca instruction. To produce redzones for those arguments, we
1076 // copy them a second time into memory allocated with an alloca instruction.
1077 void copyArgsPassedByValToAllocas();
1078
1079 // Finds all Alloca instructions and puts
1080 // poisoned red zones around all of them.
1081 // Then unpoison everything back before the function returns.
1082 void processStaticAllocas();
1083 void processDynamicAllocas();
1084
1085 void createDynamicAllocasInitStorage();
1086
1087 // ----------------------- Visitors.
1088 /// Collect all Ret instructions, or the musttail call instruction if it
1089 /// precedes the return instruction.
1090 void visitReturnInst(ReturnInst &RI) {
1091 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1092 RetVec.push_back(CI);
1093 else
1094 RetVec.push_back(&RI);
1095 }
1096
1097 /// Collect all Resume instructions.
1098 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1099
1100 /// Collect all CatchReturnInst instructions.
1101 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1102
1103 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1104 Value *SavedStack) {
1105 IRBuilder<> IRB(InstBefore);
1106 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1107 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1108 // need to adjust extracted SP to compute the address of the most recent
1109 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1110 // this purpose.
1111 if (!isa<ReturnInst>(InstBefore)) {
1112 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
1113 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
1114 {IntptrTy});
1115
1116 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
1117
1118 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1119 DynamicAreaOffset);
1120 }
1121
1122 RTCI.createRuntimeCall(
1123 IRB, AsanAllocasUnpoisonFunc,
1124 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1125 }
1126
1127 // Unpoison dynamic allocas redzones.
1128 void unpoisonDynamicAllocas() {
1129 for (Instruction *Ret : RetVec)
1130 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1131
1132 for (Instruction *StackRestoreInst : StackRestoreVec)
1133 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1134 StackRestoreInst->getOperand(0));
1135 }
1136
1137 // Deploy and poison redzones around dynamic alloca call. To do this, we
1138 // should replace this call with another one with changed parameters and
1139 // replace all its uses with new address, so
1140 // addr = alloca type, old_size, align
1141 // is replaced by
1142 // new_size = (old_size + additional_size) * sizeof(type)
1143 // tmp = alloca i8, new_size, max(align, 32)
1144 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1145 // Additional_size is added to make new memory allocation contain not only
1146 // requested memory, but also left, partial and right redzones.
1147 void handleDynamicAllocaCall(AllocaInst *AI);
1148
1149 /// Collect Alloca instructions we want (and can) handle.
1150 void visitAllocaInst(AllocaInst &AI) {
1151 // FIXME: Handle scalable vectors instead of ignoring them.
1152 const Type *AllocaType = AI.getAllocatedType();
1153 const auto *STy = dyn_cast<StructType>(AllocaType);
1154 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1155 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1156 if (AI.isStaticAlloca()) {
1157 // Skip over allocas that are present *before* the first instrumented
1158 // alloca, we don't want to move those around.
1159 if (AllocaVec.empty())
1160 return;
1161
1162 StaticAllocasToMoveUp.push_back(&AI);
1163 }
1164 return;
1165 }
1166
1167 if (!AI.isStaticAlloca())
1168 DynamicAllocaVec.push_back(&AI);
1169 else
1170 AllocaVec.push_back(&AI);
1171 }
1172
1173 /// Collect lifetime intrinsic calls to check for use-after-scope
1174 /// errors.
1176 Intrinsic::ID ID = II.getIntrinsicID();
1177 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1178 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1179 if (!ASan.UseAfterScope)
1180 return;
1181 if (!II.isLifetimeStartOrEnd())
1182 return;
1183 // Found lifetime intrinsic, add ASan instrumentation if necessary.
1184 auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1185 // If size argument is undefined, don't do anything.
1186 if (Size->isMinusOne()) return;
1187 // Check that size doesn't saturate uint64_t and can
1188 // be stored in IntptrTy.
1189 const uint64_t SizeValue = Size->getValue().getLimitedValue();
1190 if (SizeValue == ~0ULL ||
1191 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1192 return;
1193 // Find alloca instruction that corresponds to llvm.lifetime argument.
1194 // Currently we can only handle lifetime markers pointing to the
1195 // beginning of the alloca.
1196 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1197 if (!AI) {
1198 HasUntracedLifetimeIntrinsic = true;
1199 return;
1200 }
1201 // We're interested only in allocas we can handle.
1202 if (!ASan.isInterestingAlloca(*AI))
1203 return;
1204 bool DoPoison = (ID == Intrinsic::lifetime_end);
1205 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1206 if (AI->isStaticAlloca())
1207 StaticAllocaPoisonCallVec.push_back(APC);
1209 DynamicAllocaPoisonCallVec.push_back(APC);
1210 }
1211
1212 void visitCallBase(CallBase &CB) {
1213 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1214 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1215 HasReturnsTwiceCall |= CI->canReturnTwice();
1216 }
1217 }
1218
1219 // ---------------------- Helpers.
1220 void initializeCallbacks(Module &M);
1221
1222 // Copies bytes from ShadowBytes into shadow memory for indexes where
1223 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1224 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1225 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1226 IRBuilder<> &IRB, Value *ShadowBase);
1227 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1228 size_t Begin, size_t End, IRBuilder<> &IRB,
1229 Value *ShadowBase);
1230 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1231 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1232 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1233
1234 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1235
1236 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1237 bool Dynamic);
1238 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1239 Instruction *ThenTerm, Value *ValueIfFalse);
1240};
1241
1242} // end anonymous namespace
1243
1245 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1247 OS, MapClassName2PassName);
1248 OS << '<';
1249 if (Options.CompileKernel)
1250 OS << "kernel";
1251 OS << '>';
1252}
1253
1255 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1256 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1257 AsanCtorKind ConstructorKind)
1258 : Options(Options), UseGlobalGC(UseGlobalGC),
1259 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1260 ConstructorKind(ConstructorKind) {}
1261
1264 // Return early if nosanitize_address module flag is present for the module.
1265 // This implies that asan pass has already run before.
1266 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1267 return PreservedAnalyses::all();
1268
1269 ModuleAddressSanitizer ModuleSanitizer(
1270 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1271 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1272 bool Modified = false;
1273 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1274 const StackSafetyGlobalInfo *const SSGI =
1276 for (Function &F : M) {
1277 AddressSanitizer FunctionSanitizer(
1278 M, SSGI, Options.InstrumentationWithCallsThreshold,
1279 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1280 Options.UseAfterScope, Options.UseAfterReturn);
1282 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1283 }
1284 Modified |= ModuleSanitizer.instrumentModule();
1285 if (!Modified)
1286 return PreservedAnalyses::all();
1287
1289 // GlobalsAA is considered stateless and does not get invalidated unless
1290 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1291 // make changes that require GlobalsAA to be invalidated.
1292 PA.abandon<GlobalsAA>();
1293 return PA;
1294}
1295
1297 size_t Res = llvm::countr_zero(TypeSize / 8);
1299 return Res;
1300}
1301
1302/// Check if \p G has been created by a trusted compiler pass.
1304 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1305 if (G->getName().starts_with("llvm.") ||
1306 // Do not instrument gcov counter arrays.
1307 G->getName().starts_with("__llvm_gcov_ctr") ||
1308 // Do not instrument rtti proxy symbols for function sanitizer.
1309 G->getName().starts_with("__llvm_rtti_proxy"))
1310 return true;
1311
1312 // Do not instrument asan globals.
1313 if (G->getName().starts_with(kAsanGenPrefix) ||
1314 G->getName().starts_with(kSanCovGenPrefix) ||
1315 G->getName().starts_with(kODRGenPrefix))
1316 return true;
1317
1318 return false;
1319}
1320
1322 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1323 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1324 if (AddrSpace == 3 || AddrSpace == 5)
1325 return true;
1326 return false;
1327}
1328
1329Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1330 // Shadow >> scale
1331 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1332 if (Mapping.Offset == 0) return Shadow;
1333 // (Shadow >> scale) | offset
1334 Value *ShadowBase;
1335 if (LocalDynamicShadow)
1336 ShadowBase = LocalDynamicShadow;
1337 else
1338 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1339 if (Mapping.OrShadowOffset)
1340 return IRB.CreateOr(Shadow, ShadowBase);
1341 else
1342 return IRB.CreateAdd(Shadow, ShadowBase);
1343}
1344
1345// Instrument memset/memmove/memcpy
1346void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1347 RuntimeCallInserter &RTCI) {
1349 if (isa<MemTransferInst>(MI)) {
1350 RTCI.createRuntimeCall(
1351 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1352 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1353 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1354 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1355 } else if (isa<MemSetInst>(MI)) {
1356 RTCI.createRuntimeCall(
1357 IRB, AsanMemset,
1358 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1359 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1360 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1361 }
1362 MI->eraseFromParent();
1363}
1364
1365/// Check if we want (and can) handle this alloca.
1366bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1367 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1368
1369 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1370 return PreviouslySeenAllocaInfo->getSecond();
1371
1372 bool IsInteresting =
1373 (AI.getAllocatedType()->isSized() &&
1374 // alloca() may be called with 0 size, ignore it.
1375 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1376 // We are only interested in allocas not promotable to registers.
1377 // Promotable allocas are common under -O0.
1379 // inalloca allocas are not treated as static, and we don't want
1380 // dynamic alloca instrumentation for them as well.
1381 !AI.isUsedWithInAlloca() &&
1382 // swifterror allocas are register promoted by ISel
1383 !AI.isSwiftError() &&
1384 // safe allocas are not interesting
1385 !(SSGI && SSGI->isSafe(AI)));
1386
1387 ProcessedAllocas[&AI] = IsInteresting;
1388 return IsInteresting;
1389}
1390
1391bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1392 // Instrument accesses from different address spaces only for AMDGPU.
1393 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1394 if (PtrTy->getPointerAddressSpace() != 0 &&
1395 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1396 return true;
1397
1398 // Ignore swifterror addresses.
1399 // swifterror memory addresses are mem2reg promoted by instruction
1400 // selection. As such they cannot have regular uses like an instrumentation
1401 // function and it makes no sense to track them as memory.
1402 if (Ptr->isSwiftError())
1403 return true;
1404
1405 // Treat memory accesses to promotable allocas as non-interesting since they
1406 // will not cause memory violations. This greatly speeds up the instrumented
1407 // executable at -O0.
1408 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1409 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1410 return true;
1411
1412 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1414 return true;
1415
1416 return false;
1417}
1418
1419void AddressSanitizer::getInterestingMemoryOperands(
1421 // Do not instrument the load fetching the dynamic shadow address.
1422 if (LocalDynamicShadow == I)
1423 return;
1424
1425 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1426 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1427 return;
1428 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1429 LI->getType(), LI->getAlign());
1430 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1431 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1432 return;
1433 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1434 SI->getValueOperand()->getType(), SI->getAlign());
1435 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1436 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1437 return;
1438 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1439 RMW->getValOperand()->getType(), std::nullopt);
1440 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1441 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1442 return;
1443 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1444 XCHG->getCompareOperand()->getType(),
1445 std::nullopt);
1446 } else if (auto CI = dyn_cast<CallInst>(I)) {
1447 switch (CI->getIntrinsicID()) {
1448 case Intrinsic::masked_load:
1449 case Intrinsic::masked_store:
1450 case Intrinsic::masked_gather:
1451 case Intrinsic::masked_scatter: {
1452 bool IsWrite = CI->getType()->isVoidTy();
1453 // Masked store has an initial operand for the value.
1454 unsigned OpOffset = IsWrite ? 1 : 0;
1455 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1456 return;
1457
1458 auto BasePtr = CI->getOperand(OpOffset);
1459 if (ignoreAccess(I, BasePtr))
1460 return;
1461 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1462 MaybeAlign Alignment = Align(1);
1463 // Otherwise no alignment guarantees. We probably got Undef.
1464 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1465 Alignment = Op->getMaybeAlignValue();
1466 Value *Mask = CI->getOperand(2 + OpOffset);
1467 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1468 break;
1469 }
1470 case Intrinsic::masked_expandload:
1471 case Intrinsic::masked_compressstore: {
1472 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1473 unsigned OpOffset = IsWrite ? 1 : 0;
1474 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1475 return;
1476 auto BasePtr = CI->getOperand(OpOffset);
1477 if (ignoreAccess(I, BasePtr))
1478 return;
1479 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1480 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1481
1482 IRBuilder IB(I);
1483 Value *Mask = CI->getOperand(1 + OpOffset);
1484 // Use the popcount of Mask as the effective vector length.
1485 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1486 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1487 Value *EVL = IB.CreateAddReduce(ExtMask);
1488 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1489 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1490 EVL);
1491 break;
1492 }
1493 case Intrinsic::vp_load:
1494 case Intrinsic::vp_store:
1495 case Intrinsic::experimental_vp_strided_load:
1496 case Intrinsic::experimental_vp_strided_store: {
1497 auto *VPI = cast<VPIntrinsic>(CI);
1498 unsigned IID = CI->getIntrinsicID();
1499 bool IsWrite = CI->getType()->isVoidTy();
1500 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1501 return;
1502 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1503 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1504 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1505 Value *Stride = nullptr;
1506 if (IID == Intrinsic::experimental_vp_strided_store ||
1507 IID == Intrinsic::experimental_vp_strided_load) {
1508 Stride = VPI->getOperand(PtrOpNo + 1);
1509 // Use the pointer alignment as the element alignment if the stride is a
1510 // mutiple of the pointer alignment. Otherwise, the element alignment
1511 // should be Align(1).
1512 unsigned PointerAlign = Alignment.valueOrOne().value();
1513 if (!isa<ConstantInt>(Stride) ||
1514 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1515 Alignment = Align(1);
1516 }
1517 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1518 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1519 Stride);
1520 break;
1521 }
1522 case Intrinsic::vp_gather:
1523 case Intrinsic::vp_scatter: {
1524 auto *VPI = cast<VPIntrinsic>(CI);
1525 unsigned IID = CI->getIntrinsicID();
1526 bool IsWrite = IID == Intrinsic::vp_scatter;
1527 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1528 return;
1529 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1530 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1531 MaybeAlign Alignment = VPI->getPointerAlignment();
1532 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1533 VPI->getMaskParam(),
1534 VPI->getVectorLengthParam());
1535 break;
1536 }
1537 default:
1538 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1539 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1540 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1541 continue;
1542 Type *Ty = CI->getParamByValType(ArgNo);
1543 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1544 }
1545 }
1546 }
1547}
1548
1549static bool isPointerOperand(Value *V) {
1550 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1551}
1552
1553// This is a rough heuristic; it may cause both false positives and
1554// false negatives. The proper implementation requires cooperation with
1555// the frontend.
1557 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1558 if (!Cmp->isRelational())
1559 return false;
1560 } else {
1561 return false;
1562 }
1563 return isPointerOperand(I->getOperand(0)) &&
1564 isPointerOperand(I->getOperand(1));
1565}
1566
1567// This is a rough heuristic; it may cause both false positives and
1568// false negatives. The proper implementation requires cooperation with
1569// the frontend.
1571 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1572 if (BO->getOpcode() != Instruction::Sub)
1573 return false;
1574 } else {
1575 return false;
1576 }
1577 return isPointerOperand(I->getOperand(0)) &&
1578 isPointerOperand(I->getOperand(1));
1579}
1580
1581bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1582 // If a global variable does not have dynamic initialization we don't
1583 // have to instrument it. However, if a global does not have initializer
1584 // at all, we assume it has dynamic initializer (in other TU).
1585 if (!G->hasInitializer())
1586 return false;
1587
1588 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1589 return false;
1590
1591 return true;
1592}
1593
1594void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1595 Instruction *I, RuntimeCallInserter &RTCI) {
1596 IRBuilder<> IRB(I);
1597 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1598 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1599 for (Value *&i : Param) {
1600 if (i->getType()->isPointerTy())
1601 i = IRB.CreatePointerCast(i, IntptrTy);
1602 }
1603 RTCI.createRuntimeCall(IRB, F, Param);
1604}
1605
1606static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1607 Instruction *InsertBefore, Value *Addr,
1608 MaybeAlign Alignment, unsigned Granularity,
1609 TypeSize TypeStoreSize, bool IsWrite,
1610 Value *SizeArgument, bool UseCalls,
1611 uint32_t Exp, RuntimeCallInserter &RTCI) {
1612 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1613 // if the data is properly aligned.
1614 if (!TypeStoreSize.isScalable()) {
1615 const auto FixedSize = TypeStoreSize.getFixedValue();
1616 switch (FixedSize) {
1617 case 8:
1618 case 16:
1619 case 32:
1620 case 64:
1621 case 128:
1622 if (!Alignment || *Alignment >= Granularity ||
1623 *Alignment >= FixedSize / 8)
1624 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1625 FixedSize, IsWrite, nullptr, UseCalls,
1626 Exp, RTCI);
1627 }
1628 }
1629 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1630 IsWrite, nullptr, UseCalls, Exp, RTCI);
1631}
1632
1633void AddressSanitizer::instrumentMaskedLoadOrStore(
1634 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1635 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1636 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1637 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1638 RuntimeCallInserter &RTCI) {
1639 auto *VTy = cast<VectorType>(OpType);
1640 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1641 auto Zero = ConstantInt::get(IntptrTy, 0);
1642
1643 IRBuilder IB(I);
1644 Instruction *LoopInsertBefore = I;
1645 if (EVL) {
1646 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1647 // than zero, so we should check whether EVL is zero here.
1648 Type *EVLType = EVL->getType();
1649 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1650 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1651 IB.SetInsertPoint(LoopInsertBefore);
1652 // Cast EVL to IntptrTy.
1653 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1654 // To avoid undefined behavior for extracting with out of range index, use
1655 // the minimum of evl and element count as trip count.
1656 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1657 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1658 } else {
1659 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1660 }
1661
1662 // Cast Stride to IntptrTy.
1663 if (Stride)
1664 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1665
1666 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore,
1667 [&](IRBuilderBase &IRB, Value *Index) {
1668 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1669 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1670 if (MaskElemC->isZero())
1671 // No check
1672 return;
1673 // Unconditional check
1674 } else {
1675 // Conditional check
1677 MaskElem, &*IRB.GetInsertPoint(), false);
1678 IRB.SetInsertPoint(ThenTerm);
1679 }
1680
1681 Value *InstrumentedAddress;
1682 if (isa<VectorType>(Addr->getType())) {
1683 assert(
1684 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1685 "Expected vector of pointer.");
1686 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1687 } else if (Stride) {
1688 Index = IRB.CreateMul(Index, Stride);
1689 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1690 } else {
1691 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1692 }
1693 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1694 Alignment, Granularity, ElemTypeSize, IsWrite,
1695 SizeArgument, UseCalls, Exp, RTCI);
1696 });
1697}
1698
1699void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1700 InterestingMemoryOperand &O, bool UseCalls,
1701 const DataLayout &DL,
1702 RuntimeCallInserter &RTCI) {
1703 Value *Addr = O.getPtr();
1704
1705 // Optimization experiments.
1706 // The experiments can be used to evaluate potential optimizations that remove
1707 // instrumentation (assess false negatives). Instead of completely removing
1708 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1709 // experiments that want to remove instrumentation of this instruction).
1710 // If Exp is non-zero, this pass will emit special calls into runtime
1711 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1712 // make runtime terminate the program in a special way (with a different
1713 // exit status). Then you run the new compiler on a buggy corpus, collect
1714 // the special terminations (ideally, you don't see them at all -- no false
1715 // negatives) and make the decision on the optimization.
1717
1718 if (ClOpt && ClOptGlobals) {
1719 // If initialization order checking is disabled, a simple access to a
1720 // dynamically initialized global is always valid.
1721 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1722 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1723 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1724 NumOptimizedAccessesToGlobalVar++;
1725 return;
1726 }
1727 }
1728
1729 if (ClOpt && ClOptStack) {
1730 // A direct inbounds access to a stack variable is always valid.
1731 if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1732 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1733 NumOptimizedAccessesToStackVar++;
1734 return;
1735 }
1736 }
1737
1738 if (O.IsWrite)
1739 NumInstrumentedWrites++;
1740 else
1741 NumInstrumentedReads++;
1742
1743 unsigned Granularity = 1 << Mapping.Scale;
1744 if (O.MaybeMask) {
1745 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1746 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1747 Granularity, O.OpType, O.IsWrite, nullptr,
1748 UseCalls, Exp, RTCI);
1749 } else {
1750 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1751 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1752 UseCalls, Exp, RTCI);
1753 }
1754}
1755
1756Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1757 Value *Addr, bool IsWrite,
1758 size_t AccessSizeIndex,
1759 Value *SizeArgument,
1760 uint32_t Exp,
1761 RuntimeCallInserter &RTCI) {
1762 InstrumentationIRBuilder IRB(InsertBefore);
1763 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1764 CallInst *Call = nullptr;
1765 if (SizeArgument) {
1766 if (Exp == 0)
1767 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1768 {Addr, SizeArgument});
1769 else
1770 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1771 {Addr, SizeArgument, ExpVal});
1772 } else {
1773 if (Exp == 0)
1774 Call = RTCI.createRuntimeCall(
1775 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1776 else
1777 Call = RTCI.createRuntimeCall(
1778 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1779 }
1780
1781 Call->setCannotMerge();
1782 return Call;
1783}
1784
1785Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1786 Value *ShadowValue,
1787 uint32_t TypeStoreSize) {
1788 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1789 // Addr & (Granularity - 1)
1790 Value *LastAccessedByte =
1791 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1792 // (Addr & (Granularity - 1)) + size - 1
1793 if (TypeStoreSize / 8 > 1)
1794 LastAccessedByte = IRB.CreateAdd(
1795 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1796 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1797 LastAccessedByte =
1798 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1799 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1800 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1801}
1802
1803Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1804 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1805 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1806 // Do not instrument unsupported addrspaces.
1808 return nullptr;
1809 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1810 // Follow host instrumentation for global and constant addresses.
1811 if (PtrTy->getPointerAddressSpace() != 0)
1812 return InsertBefore;
1813 // Instrument generic addresses in supported addressspaces.
1814 IRBuilder<> IRB(InsertBefore);
1815 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1816 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1817 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1818 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1819 Value *AddrSpaceZeroLanding =
1820 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1821 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1822 return InsertBefore;
1823}
1824
1825Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1826 Value *Cond, bool Recover) {
1827 Module &M = *IRB.GetInsertBlock()->getModule();
1828 Value *ReportCond = Cond;
1829 if (!Recover) {
1830 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1831 IRB.getInt1Ty());
1832 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1833 }
1834
1835 auto *Trm =
1836 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1838 Trm->getParent()->setName("asan.report");
1839
1840 if (Recover)
1841 return Trm;
1842
1843 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1844 IRB.SetInsertPoint(Trm);
1845 return IRB.CreateCall(
1846 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1847}
1848
1849void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1850 Instruction *InsertBefore, Value *Addr,
1851 MaybeAlign Alignment,
1852 uint32_t TypeStoreSize, bool IsWrite,
1853 Value *SizeArgument, bool UseCalls,
1854 uint32_t Exp,
1855 RuntimeCallInserter &RTCI) {
1856 if (TargetTriple.isAMDGPU()) {
1857 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1858 TypeStoreSize, IsWrite, SizeArgument);
1859 if (!InsertBefore)
1860 return;
1861 }
1862
1863 InstrumentationIRBuilder IRB(InsertBefore);
1864 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1865
1866 if (UseCalls && ClOptimizeCallbacks) {
1867 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1868 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1869 IRB.CreateCall(
1870 Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
1871 {IRB.CreatePointerCast(Addr, PtrTy),
1872 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1873 return;
1874 }
1875
1876 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1877 if (UseCalls) {
1878 if (Exp == 0)
1879 RTCI.createRuntimeCall(
1880 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1881 else
1882 RTCI.createRuntimeCall(
1883 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1884 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1885 return;
1886 }
1887
1888 Type *ShadowTy =
1889 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1890 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1891 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1892 const uint64_t ShadowAlign =
1893 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1894 Value *ShadowValue = IRB.CreateAlignedLoad(
1895 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1896
1897 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1898 size_t Granularity = 1ULL << Mapping.Scale;
1899 Instruction *CrashTerm = nullptr;
1900
1901 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1902
1903 if (TargetTriple.isAMDGCN()) {
1904 if (GenSlowPath) {
1905 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1906 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1907 }
1908 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1909 } else if (GenSlowPath) {
1910 // We use branch weights for the slow path check, to indicate that the slow
1911 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1913 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1914 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1915 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1916 IRB.SetInsertPoint(CheckTerm);
1917 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1918 if (Recover) {
1919 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1920 } else {
1921 BasicBlock *CrashBlock =
1922 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1923 CrashTerm = new UnreachableInst(*C, CrashBlock);
1924 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1925 ReplaceInstWithInst(CheckTerm, NewTerm);
1926 }
1927 } else {
1928 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1929 }
1930
1931 Instruction *Crash = generateCrashCode(
1932 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1933 if (OrigIns->getDebugLoc())
1934 Crash->setDebugLoc(OrigIns->getDebugLoc());
1935}
1936
1937// Instrument unusual size or unusual alignment.
1938// We can not do it with a single check, so we do 1-byte check for the first
1939// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1940// to report the actual access size.
1941void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1942 Instruction *I, Instruction *InsertBefore, Value *Addr,
1943 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
1944 uint32_t Exp, RuntimeCallInserter &RTCI) {
1945 InstrumentationIRBuilder IRB(InsertBefore);
1946 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
1947 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
1948
1949 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1950 if (UseCalls) {
1951 if (Exp == 0)
1952 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1953 {AddrLong, Size});
1954 else
1955 RTCI.createRuntimeCall(
1956 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
1957 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1958 } else {
1959 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
1960 Value *LastByte = IRB.CreateIntToPtr(
1961 IRB.CreateAdd(AddrLong, SizeMinusOne),
1962 Addr->getType());
1963 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
1964 RTCI);
1965 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
1966 Exp, RTCI);
1967 }
1968}
1969
1970void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
1971 // Set up the arguments to our poison/unpoison functions.
1972 IRBuilder<> IRB(&GlobalInit.front(),
1973 GlobalInit.front().getFirstInsertionPt());
1974
1975 // Add a call to poison all external globals before the given function starts.
1976 Value *ModuleNameAddr =
1977 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
1978 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1979
1980 // Add calls to unpoison all globals before each return instruction.
1981 for (auto &BB : GlobalInit)
1982 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1983 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
1984}
1985
1986void ModuleAddressSanitizer::createInitializerPoisonCalls() {
1987 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1988 if (!GV)
1989 return;
1990
1991 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1992 if (!CA)
1993 return;
1994
1995 for (Use &OP : CA->operands()) {
1996 if (isa<ConstantAggregateZero>(OP)) continue;
1997 ConstantStruct *CS = cast<ConstantStruct>(OP);
1998
1999 // Must have a function or null ptr.
2000 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2001 if (F->getName() == kAsanModuleCtorName) continue;
2002 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2003 // Don't instrument CTORs that will run before asan.module_ctor.
2004 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2005 continue;
2006 poisonOneInitializer(*F);
2007 }
2008 }
2009}
2010
2011const GlobalVariable *
2012ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2013 // In case this function should be expanded to include rules that do not just
2014 // apply when CompileKernel is true, either guard all existing rules with an
2015 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2016 // should also apply to user space.
2017 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2018
2019 const Constant *C = GA.getAliasee();
2020
2021 // When compiling the kernel, globals that are aliased by symbols prefixed
2022 // by "__" are special and cannot be padded with a redzone.
2023 if (GA.getName().starts_with("__"))
2024 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2025
2026 return nullptr;
2027}
2028
2029bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2030 Type *Ty = G->getValueType();
2031 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2032
2033 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2034 return false;
2035 if (!Ty->isSized()) return false;
2036 if (!G->hasInitializer()) return false;
2037 // Globals in address space 1 and 4 are supported for AMDGPU.
2038 if (G->getAddressSpace() &&
2039 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2040 return false;
2041 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2042 // Two problems with thread-locals:
2043 // - The address of the main thread's copy can't be computed at link-time.
2044 // - Need to poison all copies, not just the main thread's one.
2045 if (G->isThreadLocal()) return false;
2046 // For now, just ignore this Global if the alignment is large.
2047 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2048
2049 // For non-COFF targets, only instrument globals known to be defined by this
2050 // TU.
2051 // FIXME: We can instrument comdat globals on ELF if we are using the
2052 // GC-friendly metadata scheme.
2053 if (!TargetTriple.isOSBinFormatCOFF()) {
2054 if (!G->hasExactDefinition() || G->hasComdat())
2055 return false;
2056 } else {
2057 // On COFF, don't instrument non-ODR linkages.
2058 if (G->isInterposable())
2059 return false;
2060 // If the global has AvailableExternally linkage, then it is not in this
2061 // module, which means it does not need to be instrumented.
2062 if (G->hasAvailableExternallyLinkage())
2063 return false;
2064 }
2065
2066 // If a comdat is present, it must have a selection kind that implies ODR
2067 // semantics: no duplicates, any, or exact match.
2068 if (Comdat *C = G->getComdat()) {
2069 switch (C->getSelectionKind()) {
2070 case Comdat::Any:
2071 case Comdat::ExactMatch:
2073 break;
2074 case Comdat::Largest:
2075 case Comdat::SameSize:
2076 return false;
2077 }
2078 }
2079
2080 if (G->hasSection()) {
2081 // The kernel uses explicit sections for mostly special global variables
2082 // that we should not instrument. E.g. the kernel may rely on their layout
2083 // without redzones, or remove them at link time ("discard.*"), etc.
2084 if (CompileKernel)
2085 return false;
2086
2087 StringRef Section = G->getSection();
2088
2089 // Globals from llvm.metadata aren't emitted, do not instrument them.
2090 if (Section == "llvm.metadata") return false;
2091 // Do not instrument globals from special LLVM sections.
2092 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2093 return false;
2094
2095 // Do not instrument function pointers to initialization and termination
2096 // routines: dynamic linker will not properly handle redzones.
2097 if (Section.starts_with(".preinit_array") ||
2098 Section.starts_with(".init_array") ||
2099 Section.starts_with(".fini_array")) {
2100 return false;
2101 }
2102
2103 // Do not instrument user-defined sections (with names resembling
2104 // valid C identifiers)
2105 if (TargetTriple.isOSBinFormatELF()) {
2106 if (llvm::all_of(Section,
2107 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2108 return false;
2109 }
2110
2111 // On COFF, if the section name contains '$', it is highly likely that the
2112 // user is using section sorting to create an array of globals similar to
2113 // the way initialization callbacks are registered in .init_array and
2114 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2115 // to such globals is counterproductive, because the intent is that they
2116 // will form an array, and out-of-bounds accesses are expected.
2117 // See https://github.com/google/sanitizers/issues/305
2118 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2119 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2120 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2121 << *G << "\n");
2122 return false;
2123 }
2124
2125 if (TargetTriple.isOSBinFormatMachO()) {
2126 StringRef ParsedSegment, ParsedSection;
2127 unsigned TAA = 0, StubSize = 0;
2128 bool TAAParsed;
2130 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2131
2132 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2133 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2134 // them.
2135 if (ParsedSegment == "__OBJC" ||
2136 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2137 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2138 return false;
2139 }
2140 // See https://github.com/google/sanitizers/issues/32
2141 // Constant CFString instances are compiled in the following way:
2142 // -- the string buffer is emitted into
2143 // __TEXT,__cstring,cstring_literals
2144 // -- the constant NSConstantString structure referencing that buffer
2145 // is placed into __DATA,__cfstring
2146 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2147 // Moreover, it causes the linker to crash on OS X 10.7
2148 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2149 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2150 return false;
2151 }
2152 // The linker merges the contents of cstring_literals and removes the
2153 // trailing zeroes.
2154 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2155 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2156 return false;
2157 }
2158 }
2159 }
2160
2161 if (CompileKernel) {
2162 // Globals that prefixed by "__" are special and cannot be padded with a
2163 // redzone.
2164 if (G->getName().starts_with("__"))
2165 return false;
2166 }
2167
2168 return true;
2169}
2170
2171// On Mach-O platforms, we emit global metadata in a separate section of the
2172// binary in order to allow the linker to properly dead strip. This is only
2173// supported on recent versions of ld64.
2174bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2175 if (!TargetTriple.isOSBinFormatMachO())
2176 return false;
2177
2178 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2179 return true;
2180 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2181 return true;
2182 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2183 return true;
2184 if (TargetTriple.isDriverKit())
2185 return true;
2186 if (TargetTriple.isXROS())
2187 return true;
2188
2189 return false;
2190}
2191
2192StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2193 switch (TargetTriple.getObjectFormat()) {
2194 case Triple::COFF: return ".ASAN$GL";
2195 case Triple::ELF: return "asan_globals";
2196 case Triple::MachO: return "__DATA,__asan_globals,regular";
2197 case Triple::Wasm:
2198 case Triple::GOFF:
2199 case Triple::SPIRV:
2200 case Triple::XCOFF:
2203 "ModuleAddressSanitizer not implemented for object file format");
2205 break;
2206 }
2207 llvm_unreachable("unsupported object format");
2208}
2209
2210void ModuleAddressSanitizer::initializeCallbacks() {
2211 IRBuilder<> IRB(*C);
2212
2213 // Declare our poisoning and unpoisoning functions.
2214 AsanPoisonGlobals =
2215 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2216 AsanUnpoisonGlobals =
2217 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2218
2219 // Declare functions that register/unregister globals.
2220 AsanRegisterGlobals = M.getOrInsertFunction(
2221 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2222 AsanUnregisterGlobals = M.getOrInsertFunction(
2223 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2224
2225 // Declare the functions that find globals in a shared object and then invoke
2226 // the (un)register function on them.
2227 AsanRegisterImageGlobals = M.getOrInsertFunction(
2228 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2229 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2231
2232 AsanRegisterElfGlobals =
2233 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2234 IntptrTy, IntptrTy, IntptrTy);
2235 AsanUnregisterElfGlobals =
2236 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2237 IntptrTy, IntptrTy, IntptrTy);
2238}
2239
2240// Put the metadata and the instrumented global in the same group. This ensures
2241// that the metadata is discarded if the instrumented global is discarded.
2242void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2243 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2244 Module &M = *G->getParent();
2245 Comdat *C = G->getComdat();
2246 if (!C) {
2247 if (!G->hasName()) {
2248 // If G is unnamed, it must be internal. Give it an artificial name
2249 // so we can put it in a comdat.
2250 assert(G->hasLocalLinkage());
2251 G->setName(genName("anon_global"));
2252 }
2253
2254 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2255 std::string Name = std::string(G->getName());
2256 Name += InternalSuffix;
2257 C = M.getOrInsertComdat(Name);
2258 } else {
2259 C = M.getOrInsertComdat(G->getName());
2260 }
2261
2262 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2263 // linkage to internal linkage so that a symbol table entry is emitted. This
2264 // is necessary in order to create the comdat group.
2265 if (TargetTriple.isOSBinFormatCOFF()) {
2266 C->setSelectionKind(Comdat::NoDeduplicate);
2267 if (G->hasPrivateLinkage())
2268 G->setLinkage(GlobalValue::InternalLinkage);
2269 }
2270 G->setComdat(C);
2271 }
2272
2273 assert(G->hasComdat());
2274 Metadata->setComdat(G->getComdat());
2275}
2276
2277// Create a separate metadata global and put it in the appropriate ASan
2278// global registration section.
2280ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2281 StringRef OriginalName) {
2282 auto Linkage = TargetTriple.isOSBinFormatMachO()
2286 M, Initializer->getType(), false, Linkage, Initializer,
2287 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2288 Metadata->setSection(getGlobalMetadataSection());
2289 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2290 // relocation pressure.
2292 return Metadata;
2293}
2294
2295Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2296 AsanDtorFunction = Function::createWithDefaultAttr(
2299 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2300 // Ensure Dtor cannot be discarded, even if in a comdat.
2301 appendToUsed(M, {AsanDtorFunction});
2302 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2303
2304 return ReturnInst::Create(*C, AsanDtorBB);
2305}
2306
2307void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2308 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2309 ArrayRef<Constant *> MetadataInitializers) {
2310 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2311 auto &DL = M.getDataLayout();
2312
2313 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2314 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2315 Constant *Initializer = MetadataInitializers[i];
2316 GlobalVariable *G = ExtendedGlobals[i];
2317 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2318 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2319 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2320 MetadataGlobals[i] = Metadata;
2321
2322 // The MSVC linker always inserts padding when linking incrementally. We
2323 // cope with that by aligning each struct to its size, which must be a power
2324 // of two.
2325 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2326 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2327 "global metadata will not be padded appropriately");
2328 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2329
2330 SetComdatForGlobalMetadata(G, Metadata, "");
2331 }
2332
2333 // Update llvm.compiler.used, adding the new metadata globals. This is
2334 // needed so that during LTO these variables stay alive.
2335 if (!MetadataGlobals.empty())
2336 appendToCompilerUsed(M, MetadataGlobals);
2337}
2338
2339void ModuleAddressSanitizer::instrumentGlobalsELF(
2340 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2341 ArrayRef<Constant *> MetadataInitializers,
2342 const std::string &UniqueModuleId) {
2343 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2344
2345 // Putting globals in a comdat changes the semantic and potentially cause
2346 // false negative odr violations at link time. If odr indicators are used, we
2347 // keep the comdat sections, as link time odr violations will be dectected on
2348 // the odr indicator symbols.
2349 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2350
2351 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2352 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2353 GlobalVariable *G = ExtendedGlobals[i];
2355 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2356 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2357 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2358 MetadataGlobals[i] = Metadata;
2359
2360 if (UseComdatForGlobalsGC)
2361 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2362 }
2363
2364 // Update llvm.compiler.used, adding the new metadata globals. This is
2365 // needed so that during LTO these variables stay alive.
2366 if (!MetadataGlobals.empty())
2367 appendToCompilerUsed(M, MetadataGlobals);
2368
2369 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2370 // to look up the loaded image that contains it. Second, we can store in it
2371 // whether registration has already occurred, to prevent duplicate
2372 // registration.
2373 //
2374 // Common linkage ensures that there is only one global per shared library.
2375 GlobalVariable *RegisteredFlag = new GlobalVariable(
2376 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2377 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2379
2380 // Create start and stop symbols.
2381 GlobalVariable *StartELFMetadata = new GlobalVariable(
2382 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2383 "__start_" + getGlobalMetadataSection());
2385 GlobalVariable *StopELFMetadata = new GlobalVariable(
2386 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2387 "__stop_" + getGlobalMetadataSection());
2389
2390 // Create a call to register the globals with the runtime.
2391 if (ConstructorKind == AsanCtorKind::Global)
2392 IRB.CreateCall(AsanRegisterElfGlobals,
2393 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2394 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2395 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2396
2397 // We also need to unregister globals at the end, e.g., when a shared library
2398 // gets closed.
2399 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2400 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2401 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2402 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2403 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2404 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2405 }
2406}
2407
2408void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2409 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2410 ArrayRef<Constant *> MetadataInitializers) {
2411 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2412
2413 // On recent Mach-O platforms, use a structure which binds the liveness of
2414 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2415 // created to be added to llvm.compiler.used
2416 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2417 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2418
2419 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2420 Constant *Initializer = MetadataInitializers[i];
2421 GlobalVariable *G = ExtendedGlobals[i];
2422 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2423
2424 // On recent Mach-O platforms, we emit the global metadata in a way that
2425 // allows the linker to properly strip dead globals.
2426 auto LivenessBinder =
2427 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2429 GlobalVariable *Liveness = new GlobalVariable(
2430 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2431 Twine("__asan_binder_") + G->getName());
2432 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2433 LivenessGlobals[i] = Liveness;
2434 }
2435
2436 // Update llvm.compiler.used, adding the new liveness globals. This is
2437 // needed so that during LTO these variables stay alive. The alternative
2438 // would be to have the linker handling the LTO symbols, but libLTO
2439 // current API does not expose access to the section for each symbol.
2440 if (!LivenessGlobals.empty())
2441 appendToCompilerUsed(M, LivenessGlobals);
2442
2443 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2444 // to look up the loaded image that contains it. Second, we can store in it
2445 // whether registration has already occurred, to prevent duplicate
2446 // registration.
2447 //
2448 // common linkage ensures that there is only one global per shared library.
2449 GlobalVariable *RegisteredFlag = new GlobalVariable(
2450 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2451 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2453
2454 if (ConstructorKind == AsanCtorKind::Global)
2455 IRB.CreateCall(AsanRegisterImageGlobals,
2456 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2457
2458 // We also need to unregister globals at the end, e.g., when a shared library
2459 // gets closed.
2460 if (DestructorKind != AsanDtorKind::None) {
2461 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2462 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2463 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2464 }
2465}
2466
2467void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2468 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2469 ArrayRef<Constant *> MetadataInitializers) {
2470 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2471 unsigned N = ExtendedGlobals.size();
2472 assert(N > 0);
2473
2474 // On platforms that don't have a custom metadata section, we emit an array
2475 // of global metadata structures.
2476 ArrayType *ArrayOfGlobalStructTy =
2477 ArrayType::get(MetadataInitializers[0]->getType(), N);
2478 auto AllGlobals = new GlobalVariable(
2479 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2480 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2481 if (Mapping.Scale > 3)
2482 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2483
2484 if (ConstructorKind == AsanCtorKind::Global)
2485 IRB.CreateCall(AsanRegisterGlobals,
2486 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2487 ConstantInt::get(IntptrTy, N)});
2488
2489 // We also need to unregister globals at the end, e.g., when a shared library
2490 // gets closed.
2491 if (DestructorKind != AsanDtorKind::None) {
2492 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2493 IrbDtor.CreateCall(AsanUnregisterGlobals,
2494 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2495 ConstantInt::get(IntptrTy, N)});
2496 }
2497}
2498
2499// This function replaces all global variables with new variables that have
2500// trailing redzones. It also creates a function that poisons
2501// redzones and inserts this function into llvm.global_ctors.
2502// Sets *CtorComdat to true if the global registration code emitted into the
2503// asan constructor is comdat-compatible.
2504void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2505 bool *CtorComdat) {
2506 // Build set of globals that are aliased by some GA, where
2507 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2508 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2509 if (CompileKernel) {
2510 for (auto &GA : M.aliases()) {
2511 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2512 AliasedGlobalExclusions.insert(GV);
2513 }
2514 }
2515
2516 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2517 for (auto &G : M.globals()) {
2518 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2519 GlobalsToChange.push_back(&G);
2520 }
2521
2522 size_t n = GlobalsToChange.size();
2523 auto &DL = M.getDataLayout();
2524
2525 // A global is described by a structure
2526 // size_t beg;
2527 // size_t size;
2528 // size_t size_with_redzone;
2529 // const char *name;
2530 // const char *module_name;
2531 // size_t has_dynamic_init;
2532 // size_t padding_for_windows_msvc_incremental_link;
2533 // size_t odr_indicator;
2534 // We initialize an array of such structures and pass it to a run-time call.
2535 StructType *GlobalStructTy =
2536 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2537 IntptrTy, IntptrTy, IntptrTy);
2539 SmallVector<Constant *, 16> Initializers(n);
2540
2541 for (size_t i = 0; i < n; i++) {
2542 GlobalVariable *G = GlobalsToChange[i];
2543
2545 if (G->hasSanitizerMetadata())
2546 MD = G->getSanitizerMetadata();
2547
2548 // The runtime library tries demangling symbol names in the descriptor but
2549 // functionality like __cxa_demangle may be unavailable (e.g.
2550 // -static-libstdc++). So we demangle the symbol names here.
2551 std::string NameForGlobal = G->getName().str();
2554 /*AllowMerging*/ true, genName("global"));
2555
2556 Type *Ty = G->getValueType();
2557 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2558 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2559 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2560
2561 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2562 Constant *NewInitializer = ConstantStruct::get(
2563 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2564
2565 // Create a new global variable with enough space for a redzone.
2566 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2567 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2569 GlobalVariable *NewGlobal = new GlobalVariable(
2570 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2571 G->getThreadLocalMode(), G->getAddressSpace());
2572 NewGlobal->copyAttributesFrom(G);
2573 NewGlobal->setComdat(G->getComdat());
2574 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2575 // Don't fold globals with redzones. ODR violation detector and redzone
2576 // poisoning implicitly creates a dependence on the global's address, so it
2577 // is no longer valid for it to be marked unnamed_addr.
2579
2580 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2581 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2582 G->isConstant()) {
2583 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2584 if (Seq && Seq->isCString())
2585 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2586 }
2587
2588 // Transfer the debug info and type metadata. The payload starts at offset
2589 // zero so we can copy the metadata over as is.
2590 NewGlobal->copyMetadata(G, 0);
2591
2592 Value *Indices2[2];
2593 Indices2[0] = IRB.getInt32(0);
2594 Indices2[1] = IRB.getInt32(0);
2595
2596 G->replaceAllUsesWith(
2597 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2598 NewGlobal->takeName(G);
2599 G->eraseFromParent();
2600 NewGlobals[i] = NewGlobal;
2601
2602 Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2603 GlobalValue *InstrumentedGlobal = NewGlobal;
2604
2605 bool CanUsePrivateAliases =
2606 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2607 TargetTriple.isOSBinFormatWasm();
2608 if (CanUsePrivateAliases && UsePrivateAlias) {
2609 // Create local alias for NewGlobal to avoid crash on ODR between
2610 // instrumented and non-instrumented libraries.
2611 InstrumentedGlobal =
2613 }
2614
2615 // ODR should not happen for local linkage.
2616 if (NewGlobal->hasLocalLinkage()) {
2617 ODRIndicator =
2618 ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2619 } else if (UseOdrIndicator) {
2620 // With local aliases, we need to provide another externally visible
2621 // symbol __odr_asan_XXX to detect ODR violation.
2622 auto *ODRIndicatorSym =
2623 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2625 kODRGenPrefix + NameForGlobal, nullptr,
2626 NewGlobal->getThreadLocalMode());
2627
2628 // Set meaningful attributes for indicator symbol.
2629 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2630 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2631 ODRIndicatorSym->setAlignment(Align(1));
2632 ODRIndicator = ODRIndicatorSym;
2633 }
2634
2635 Constant *Initializer = ConstantStruct::get(
2636 GlobalStructTy,
2637 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2638 ConstantInt::get(IntptrTy, SizeInBytes),
2639 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2641 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2642 ConstantInt::get(IntptrTy, MD.IsDynInit),
2643 Constant::getNullValue(IntptrTy),
2644 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2645
2646 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2647
2648 Initializers[i] = Initializer;
2649 }
2650
2651 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2652 // ConstantMerge'ing them.
2653 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2654 for (size_t i = 0; i < n; i++) {
2655 GlobalVariable *G = NewGlobals[i];
2656 if (G->getName().empty()) continue;
2657 GlobalsToAddToUsedList.push_back(G);
2658 }
2659 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2660
2661 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2662 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2663 // linkage unit will only have one module constructor, and (b) the register
2664 // function will be called. The module destructor is not created when n ==
2665 // 0.
2666 *CtorComdat = true;
2667 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2668 } else if (n == 0) {
2669 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2670 // all compile units will have identical module constructor/destructor.
2671 *CtorComdat = TargetTriple.isOSBinFormatELF();
2672 } else {
2673 *CtorComdat = false;
2674 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2675 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2676 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2677 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2678 } else {
2679 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2680 }
2681 }
2682
2683 // Create calls for poisoning before initializers run and unpoisoning after.
2684 if (ClInitializers)
2685 createInitializerPoisonCalls();
2686
2687 LLVM_DEBUG(dbgs() << M);
2688}
2689
2691ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2692 constexpr uint64_t kMaxRZ = 1 << 18;
2693 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2694
2695 uint64_t RZ = 0;
2696 if (SizeInBytes <= MinRZ / 2) {
2697 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2698 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2699 // half of MinRZ.
2700 RZ = MinRZ - SizeInBytes;
2701 } else {
2702 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2703 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2704
2705 // Round up to multiple of MinRZ.
2706 if (SizeInBytes % MinRZ)
2707 RZ += MinRZ - (SizeInBytes % MinRZ);
2708 }
2709
2710 assert((RZ + SizeInBytes) % MinRZ == 0);
2711
2712 return RZ;
2713}
2714
2715int ModuleAddressSanitizer::GetAsanVersion() const {
2716 int LongSize = M.getDataLayout().getPointerSizeInBits();
2717 bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2718 int Version = 8;
2719 // 32-bit Android is one version ahead because of the switch to dynamic
2720 // shadow.
2721 Version += (LongSize == 32 && isAndroid);
2722 return Version;
2723}
2724
2725GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2726 if (!ModuleName) {
2727 // We shouldn't merge same module names, as this string serves as unique
2728 // module ID in runtime.
2729 ModuleName =
2730 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2731 /*AllowMerging*/ false, genName("module"));
2732 }
2733 return ModuleName;
2734}
2735
2736bool ModuleAddressSanitizer::instrumentModule() {
2737 initializeCallbacks();
2738
2739 // Create a module constructor. A destructor is created lazily because not all
2740 // platforms, and not all modules need it.
2741 if (ConstructorKind == AsanCtorKind::Global) {
2742 if (CompileKernel) {
2743 // The kernel always builds with its own runtime, and therefore does not
2744 // need the init and version check calls.
2745 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2746 } else {
2747 std::string AsanVersion = std::to_string(GetAsanVersion());
2748 std::string VersionCheckName =
2749 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2750 std::tie(AsanCtorFunction, std::ignore) =
2752 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2753 /*InitArgs=*/{}, VersionCheckName);
2754 }
2755 }
2756
2757 bool CtorComdat = true;
2758 if (ClGlobals) {
2759 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2760 if (AsanCtorFunction) {
2761 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2762 instrumentGlobals(IRB, &CtorComdat);
2763 } else {
2764 IRBuilder<> IRB(*C);
2765 instrumentGlobals(IRB, &CtorComdat);
2766 }
2767 }
2768
2769 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2770
2771 // Put the constructor and destructor in comdat if both
2772 // (1) global instrumentation is not TU-specific
2773 // (2) target is ELF.
2774 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2775 if (AsanCtorFunction) {
2776 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2777 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2778 }
2779 if (AsanDtorFunction) {
2780 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2781 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2782 }
2783 } else {
2784 if (AsanCtorFunction)
2785 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2786 if (AsanDtorFunction)
2787 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2788 }
2789
2790 return true;
2791}
2792
2793void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2794 IRBuilder<> IRB(*C);
2795 // Create __asan_report* callbacks.
2796 // IsWrite, TypeSize and Exp are encoded in the function name.
2797 for (int Exp = 0; Exp < 2; Exp++) {
2798 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2799 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2800 const std::string ExpStr = Exp ? "exp_" : "";
2801 const std::string EndingStr = Recover ? "_noabort" : "";
2802
2803 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2804 SmallVector<Type *, 2> Args1{1, IntptrTy};
2805 AttributeList AL2;
2806 AttributeList AL1;
2807 if (Exp) {
2808 Type *ExpType = Type::getInt32Ty(*C);
2809 Args2.push_back(ExpType);
2810 Args1.push_back(ExpType);
2811 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2812 AL2 = AL2.addParamAttribute(*C, 2, AK);
2813 AL1 = AL1.addParamAttribute(*C, 1, AK);
2814 }
2815 }
2816 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2817 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2818 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2819
2820 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2821 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2822 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2823
2824 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2825 AccessSizeIndex++) {
2826 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2827 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2828 M.getOrInsertFunction(
2829 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2830 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2831
2832 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2833 M.getOrInsertFunction(
2834 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2835 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2836 }
2837 }
2838 }
2839
2840 const std::string MemIntrinCallbackPrefix =
2841 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2842 ? std::string("")
2844 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2845 PtrTy, PtrTy, PtrTy, IntptrTy);
2846 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2847 PtrTy, PtrTy, IntptrTy);
2848 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2849 TLI->getAttrList(C, {1}, /*Signed=*/false),
2850 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2851
2852 AsanHandleNoReturnFunc =
2853 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2854
2855 AsanPtrCmpFunction =
2856 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2857 AsanPtrSubFunction =
2858 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2859 if (Mapping.InGlobal)
2860 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2861 ArrayType::get(IRB.getInt8Ty(), 0));
2862
2863 AMDGPUAddressShared =
2864 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2865 AMDGPUAddressPrivate =
2866 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2867}
2868
2869bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2870 // For each NSObject descendant having a +load method, this method is invoked
2871 // by the ObjC runtime before any of the static constructors is called.
2872 // Therefore we need to instrument such methods with a call to __asan_init
2873 // at the beginning in order to initialize our runtime before any access to
2874 // the shadow memory.
2875 // We cannot just ignore these methods, because they may call other
2876 // instrumented functions.
2877 if (F.getName().contains(" load]")) {
2878 FunctionCallee AsanInitFunction =
2879 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2880 IRBuilder<> IRB(&F.front(), F.front().begin());
2881 IRB.CreateCall(AsanInitFunction, {});
2882 return true;
2883 }
2884 return false;
2885}
2886
2887bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2888 // Generate code only when dynamic addressing is needed.
2889 if (Mapping.Offset != kDynamicShadowSentinel)
2890 return false;
2891
2892 IRBuilder<> IRB(&F.front().front());
2893 if (Mapping.InGlobal) {
2895 // An empty inline asm with input reg == output reg.
2896 // An opaque pointer-to-int cast, basically.
2898 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2899 StringRef(""), StringRef("=r,0"),
2900 /*hasSideEffects=*/false);
2901 LocalDynamicShadow =
2902 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2903 } else {
2904 LocalDynamicShadow =
2905 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2906 }
2907 } else {
2908 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2910 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2911 }
2912 return true;
2913}
2914
2915void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2916 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2917 // to it as uninteresting. This assumes we haven't started processing allocas
2918 // yet. This check is done up front because iterating the use list in
2919 // isInterestingAlloca would be algorithmically slower.
2920 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2921
2922 // Try to get the declaration of llvm.localescape. If it's not in the module,
2923 // we can exit early.
2924 if (!F.getParent()->getFunction("llvm.localescape")) return;
2925
2926 // Look for a call to llvm.localescape call in the entry block. It can't be in
2927 // any other block.
2928 for (Instruction &I : F.getEntryBlock()) {
2929 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2930 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2931 // We found a call. Mark all the allocas passed in as uninteresting.
2932 for (Value *Arg : II->args()) {
2933 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2934 assert(AI && AI->isStaticAlloca() &&
2935 "non-static alloca arg to localescape");
2936 ProcessedAllocas[AI] = false;
2937 }
2938 break;
2939 }
2940 }
2941}
2942
2943bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2944 bool ShouldInstrument =
2945 ClDebugMin < 0 || ClDebugMax < 0 ||
2946 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2947 Instrumented++;
2948 return !ShouldInstrument;
2949}
2950
2951bool AddressSanitizer::instrumentFunction(Function &F,
2952 const TargetLibraryInfo *TLI) {
2953 if (F.empty())
2954 return false;
2955 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2956 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2957 if (F.getName().starts_with("__asan_")) return false;
2958 if (F.isPresplitCoroutine())
2959 return false;
2960
2961 bool FunctionModified = false;
2962
2963 // If needed, insert __asan_init before checking for SanitizeAddress attr.
2964 // This function needs to be called even if the function body is not
2965 // instrumented.
2966 if (maybeInsertAsanInitAtFunctionEntry(F))
2967 FunctionModified = true;
2968
2969 // Leave if the function doesn't need instrumentation.
2970 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2971
2972 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2973 return FunctionModified;
2974
2975 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2976
2977 initializeCallbacks(TLI);
2978
2979 FunctionStateRAII CleanupObj(this);
2980
2981 RuntimeCallInserter RTCI(F);
2982
2983 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
2984
2985 // We can't instrument allocas used with llvm.localescape. Only static allocas
2986 // can be passed to that intrinsic.
2987 markEscapedLocalAllocas(F);
2988
2989 // We want to instrument every address only once per basic block (unless there
2990 // are calls between uses).
2991 SmallPtrSet<Value *, 16> TempsToInstrument;
2992 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
2993 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
2994 SmallVector<Instruction *, 8> NoReturnCalls;
2996 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2997
2998 // Fill the set of memory operations to instrument.
2999 for (auto &BB : F) {
3000 AllBlocks.push_back(&BB);
3001 TempsToInstrument.clear();
3002 int NumInsnsPerBB = 0;
3003 for (auto &Inst : BB) {
3004 if (LooksLikeCodeInBug11395(&Inst)) return false;
3005 // Skip instructions inserted by another instrumentation.
3006 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3007 continue;
3008 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3009 getInterestingMemoryOperands(&Inst, InterestingOperands);
3010
3011 if (!InterestingOperands.empty()) {
3012 for (auto &Operand : InterestingOperands) {
3013 if (ClOpt && ClOptSameTemp) {
3014 Value *Ptr = Operand.getPtr();
3015 // If we have a mask, skip instrumentation if we've already
3016 // instrumented the full object. But don't add to TempsToInstrument
3017 // because we might get another load/store with a different mask.
3018 if (Operand.MaybeMask) {
3019 if (TempsToInstrument.count(Ptr))
3020 continue; // We've seen this (whole) temp in the current BB.
3021 } else {
3022 if (!TempsToInstrument.insert(Ptr).second)
3023 continue; // We've seen this temp in the current BB.
3024 }
3025 }
3026 OperandsToInstrument.push_back(Operand);
3027 NumInsnsPerBB++;
3028 }
3029 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3033 PointerComparisonsOrSubtracts.push_back(&Inst);
3034 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3035 // ok, take it.
3036 IntrinToInstrument.push_back(MI);
3037 NumInsnsPerBB++;
3038 } else {
3039 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3040 // A call inside BB.
3041 TempsToInstrument.clear();
3042 if (CB->doesNotReturn())
3043 NoReturnCalls.push_back(CB);
3044 }
3045 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3047 }
3048 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3049 }
3050 }
3051
3052 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3053 OperandsToInstrument.size() + IntrinToInstrument.size() >
3054 (unsigned)InstrumentationWithCallsThreshold);
3055 const DataLayout &DL = F.getDataLayout();
3056 ObjectSizeOpts ObjSizeOpts;
3057 ObjSizeOpts.RoundToAlign = true;
3058 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
3059
3060 // Instrument.
3061 int NumInstrumented = 0;
3062 for (auto &Operand : OperandsToInstrument) {
3063 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3064 instrumentMop(ObjSizeVis, Operand, UseCalls,
3065 F.getDataLayout(), RTCI);
3066 FunctionModified = true;
3067 }
3068 for (auto *Inst : IntrinToInstrument) {
3069 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3070 instrumentMemIntrinsic(Inst, RTCI);
3071 FunctionModified = true;
3072 }
3073
3074 FunctionStackPoisoner FSP(F, *this, RTCI);
3075 bool ChangedStack = FSP.runOnFunction();
3076
3077 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3078 // See e.g. https://github.com/google/sanitizers/issues/37
3079 for (auto *CI : NoReturnCalls) {
3080 IRBuilder<> IRB(CI);
3081 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3082 }
3083
3084 for (auto *Inst : PointerComparisonsOrSubtracts) {
3085 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3086 FunctionModified = true;
3087 }
3088
3089 if (ChangedStack || !NoReturnCalls.empty())
3090 FunctionModified = true;
3091
3092 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3093 << F << "\n");
3094
3095 return FunctionModified;
3096}
3097
3098// Workaround for bug 11395: we don't want to instrument stack in functions
3099// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3100// FIXME: remove once the bug 11395 is fixed.
3101bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3102 if (LongSize != 32) return false;
3103 CallInst *CI = dyn_cast<CallInst>(I);
3104 if (!CI || !CI->isInlineAsm()) return false;
3105 if (CI->arg_size() <= 5)
3106 return false;
3107 // We have inline assembly with quite a few arguments.
3108 return true;
3109}
3110
3111void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3112 IRBuilder<> IRB(*C);
3113 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3114 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3115 const char *MallocNameTemplate =
3116 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3119 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3120 std::string Suffix = itostr(Index);
3121 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3122 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3123 AsanStackFreeFunc[Index] =
3124 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3125 IRB.getVoidTy(), IntptrTy, IntptrTy);
3126 }
3127 }
3128 if (ASan.UseAfterScope) {
3129 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3130 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3131 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3132 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3133 }
3134
3135 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3136 0xf3, 0xf5, 0xf8}) {
3137 std::ostringstream Name;
3139 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3140 AsanSetShadowFunc[Val] =
3141 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3142 }
3143
3144 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3145 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3146 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3147 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3148}
3149
3150void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3151 ArrayRef<uint8_t> ShadowBytes,
3152 size_t Begin, size_t End,
3153 IRBuilder<> &IRB,
3154 Value *ShadowBase) {
3155 if (Begin >= End)
3156 return;
3157
3158 const size_t LargestStoreSizeInBytes =
3159 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3160
3161 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3162
3163 // Poison given range in shadow using larges store size with out leading and
3164 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3165 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3166 // middle of a store.
3167 for (size_t i = Begin; i < End;) {
3168 if (!ShadowMask[i]) {
3169 assert(!ShadowBytes[i]);
3170 ++i;
3171 continue;
3172 }
3173
3174 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3175 // Fit store size into the range.
3176 while (StoreSizeInBytes > End - i)
3177 StoreSizeInBytes /= 2;
3178
3179 // Minimize store size by trimming trailing zeros.
3180 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3181 while (j <= StoreSizeInBytes / 2)
3182 StoreSizeInBytes /= 2;
3183 }
3184
3185 uint64_t Val = 0;
3186 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3187 if (IsLittleEndian)
3188 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3189 else
3190 Val = (Val << 8) | ShadowBytes[i + j];
3191 }
3192
3193 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3194 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3197 Align(1));
3198
3199 i += StoreSizeInBytes;
3200 }
3201}
3202
3203void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3204 ArrayRef<uint8_t> ShadowBytes,
3205 IRBuilder<> &IRB, Value *ShadowBase) {
3206 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3207}
3208
3209void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3210 ArrayRef<uint8_t> ShadowBytes,
3211 size_t Begin, size_t End,
3212 IRBuilder<> &IRB, Value *ShadowBase) {
3213 assert(ShadowMask.size() == ShadowBytes.size());
3214 size_t Done = Begin;
3215 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3216 if (!ShadowMask[i]) {
3217 assert(!ShadowBytes[i]);
3218 continue;
3219 }
3220 uint8_t Val = ShadowBytes[i];
3221 if (!AsanSetShadowFunc[Val])
3222 continue;
3223
3224 // Skip same values.
3225 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3226 }
3227
3228 if (j - i >= ASan.MaxInlinePoisoningSize) {
3229 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3230 RTCI.createRuntimeCall(
3231 IRB, AsanSetShadowFunc[Val],
3232 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3233 ConstantInt::get(IntptrTy, j - i)});
3234 Done = j;
3235 }
3236 }
3237
3238 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3239}
3240
3241// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3242// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3243static int StackMallocSizeClass(uint64_t LocalStackSize) {
3244 assert(LocalStackSize <= kMaxStackMallocSize);
3245 uint64_t MaxSize = kMinStackMallocSize;
3246 for (int i = 0;; i++, MaxSize *= 2)
3247 if (LocalStackSize <= MaxSize) return i;
3248 llvm_unreachable("impossible LocalStackSize");
3249}
3250
3251void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3252 Instruction *CopyInsertPoint = &F.front().front();
3253 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3254 // Insert after the dynamic shadow location is determined
3255 CopyInsertPoint = CopyInsertPoint->getNextNode();
3256 assert(CopyInsertPoint);
3257 }
3258 IRBuilder<> IRB(CopyInsertPoint);
3259 const DataLayout &DL = F.getDataLayout();
3260 for (Argument &Arg : F.args()) {
3261 if (Arg.hasByValAttr()) {
3262 Type *Ty = Arg.getParamByValType();
3263 const Align Alignment =
3264 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3265
3266 AllocaInst *AI = IRB.CreateAlloca(
3267 Ty, nullptr,
3268 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3269 ".byval");
3270 AI->setAlignment(Alignment);
3271 Arg.replaceAllUsesWith(AI);
3272
3273 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3274 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3275 }
3276 }
3277}
3278
3279PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3280 Value *ValueIfTrue,
3281 Instruction *ThenTerm,
3282 Value *ValueIfFalse) {
3283 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3284 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3285 PHI->addIncoming(ValueIfFalse, CondBlock);
3286 BasicBlock *ThenBlock = ThenTerm->getParent();
3287 PHI->addIncoming(ValueIfTrue, ThenBlock);
3288 return PHI;
3289}
3290
3291Value *FunctionStackPoisoner::createAllocaForLayout(
3292 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3293 AllocaInst *Alloca;
3294 if (Dynamic) {
3295 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3296 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3297 "MyAlloca");
3298 } else {
3299 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3300 nullptr, "MyAlloca");
3301 assert(Alloca->isStaticAlloca());
3302 }
3303 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3304 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3305 Alloca->setAlignment(Align(FrameAlignment));
3306 return IRB.CreatePointerCast(Alloca, IntptrTy);
3307}
3308
3309void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3310 BasicBlock &FirstBB = *F.begin();
3311 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3312 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3313 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3314 DynamicAllocaLayout->setAlignment(Align(32));
3315}
3316
3317void FunctionStackPoisoner::processDynamicAllocas() {
3318 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3319 assert(DynamicAllocaPoisonCallVec.empty());
3320 return;
3321 }
3322
3323 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3324 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3325 assert(APC.InsBefore);
3326 assert(APC.AI);
3327 assert(ASan.isInterestingAlloca(*APC.AI));
3328 assert(!APC.AI->isStaticAlloca());
3329
3330 IRBuilder<> IRB(APC.InsBefore);
3331 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3332 // Dynamic allocas will be unpoisoned unconditionally below in
3333 // unpoisonDynamicAllocas.
3334 // Flag that we need unpoison static allocas.
3335 }
3336
3337 // Handle dynamic allocas.
3338 createDynamicAllocasInitStorage();
3339 for (auto &AI : DynamicAllocaVec)
3340 handleDynamicAllocaCall(AI);
3341 unpoisonDynamicAllocas();
3342}
3343
3344/// Collect instructions in the entry block after \p InsBefore which initialize
3345/// permanent storage for a function argument. These instructions must remain in
3346/// the entry block so that uninitialized values do not appear in backtraces. An
3347/// added benefit is that this conserves spill slots. This does not move stores
3348/// before instrumented / "interesting" allocas.
3350 AddressSanitizer &ASan, Instruction &InsBefore,
3351 SmallVectorImpl<Instruction *> &InitInsts) {
3352 Instruction *Start = InsBefore.getNextNonDebugInstruction();
3353 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3354 // Argument initialization looks like:
3355 // 1) store <Argument>, <Alloca> OR
3356 // 2) <CastArgument> = cast <Argument> to ...
3357 // store <CastArgument> to <Alloca>
3358 // Do not consider any other kind of instruction.
3359 //
3360 // Note: This covers all known cases, but may not be exhaustive. An
3361 // alternative to pattern-matching stores is to DFS over all Argument uses:
3362 // this might be more general, but is probably much more complicated.
3363 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3364 continue;
3365 if (auto *Store = dyn_cast<StoreInst>(It)) {
3366 // The store destination must be an alloca that isn't interesting for
3367 // ASan to instrument. These are moved up before InsBefore, and they're
3368 // not interesting because allocas for arguments can be mem2reg'd.
3369 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3370 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3371 continue;
3372
3373 Value *Val = Store->getValueOperand();
3374 bool IsDirectArgInit = isa<Argument>(Val);
3375 bool IsArgInitViaCast =
3376 isa<CastInst>(Val) &&
3377 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3378 // Check that the cast appears directly before the store. Otherwise
3379 // moving the cast before InsBefore may break the IR.
3380 Val == It->getPrevNonDebugInstruction();
3381 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3382 if (!IsArgInit)
3383 continue;
3384
3385 if (IsArgInitViaCast)
3386 InitInsts.push_back(cast<Instruction>(Val));
3387 InitInsts.push_back(Store);
3388 continue;
3389 }
3390
3391 // Do not reorder past unknown instructions: argument initialization should
3392 // only involve casts and stores.
3393 return;
3394 }
3395}
3396
3397void FunctionStackPoisoner::processStaticAllocas() {
3398 if (AllocaVec.empty()) {
3399 assert(StaticAllocaPoisonCallVec.empty());
3400 return;
3401 }
3402
3403 int StackMallocIdx = -1;
3404 DebugLoc EntryDebugLocation;
3405 if (auto SP = F.getSubprogram())
3406 EntryDebugLocation =
3407 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3408
3409 Instruction *InsBefore = AllocaVec[0];
3410 IRBuilder<> IRB(InsBefore);
3411
3412 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3413 // debug info is broken, because only entry-block allocas are treated as
3414 // regular stack slots.
3415 auto InsBeforeB = InsBefore->getParent();
3416 assert(InsBeforeB == &F.getEntryBlock());
3417 for (auto *AI : StaticAllocasToMoveUp)
3418 if (AI->getParent() == InsBeforeB)
3419 AI->moveBefore(InsBefore);
3420
3421 // Move stores of arguments into entry-block allocas as well. This prevents
3422 // extra stack slots from being generated (to house the argument values until
3423 // they can be stored into the allocas). This also prevents uninitialized
3424 // values from being shown in backtraces.
3425 SmallVector<Instruction *, 8> ArgInitInsts;
3426 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3427 for (Instruction *ArgInitInst : ArgInitInsts)
3428 ArgInitInst->moveBefore(InsBefore);
3429
3430 // If we have a call to llvm.localescape, keep it in the entry block.
3431 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
3432
3434 SVD.reserve(AllocaVec.size());
3435 for (AllocaInst *AI : AllocaVec) {
3437 ASan.getAllocaSizeInBytes(*AI),
3438 0,
3439 AI->getAlign().value(),
3440 AI,
3441 0,
3442 0};
3443 SVD.push_back(D);
3444 }
3445
3446 // Minimal header size (left redzone) is 4 pointers,
3447 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3448 uint64_t Granularity = 1ULL << Mapping.Scale;
3449 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3450 const ASanStackFrameLayout &L =
3451 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3452
3453 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3455 for (auto &Desc : SVD)
3456 AllocaToSVDMap[Desc.AI] = &Desc;
3457
3458 // Update SVD with information from lifetime intrinsics.
3459 for (const auto &APC : StaticAllocaPoisonCallVec) {
3460 assert(APC.InsBefore);
3461 assert(APC.AI);
3462 assert(ASan.isInterestingAlloca(*APC.AI));
3463 assert(APC.AI->isStaticAlloca());
3464
3465 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3466 Desc.LifetimeSize = Desc.Size;
3467 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3468 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3469 if (LifetimeLoc->getFile() == FnLoc->getFile())
3470 if (unsigned Line = LifetimeLoc->getLine())
3471 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3472 }
3473 }
3474 }
3475
3476 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3477 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3478 uint64_t LocalStackSize = L.FrameSize;
3479 bool DoStackMalloc =
3480 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3481 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3482 bool DoDynamicAlloca = ClDynamicAllocaStack;
3483 // Don't do dynamic alloca or stack malloc if:
3484 // 1) There is inline asm: too often it makes assumptions on which registers
3485 // are available.
3486 // 2) There is a returns_twice call (typically setjmp), which is
3487 // optimization-hostile, and doesn't play well with introduced indirect
3488 // register-relative calculation of local variable addresses.
3489 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3490 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3491
3492 Value *StaticAlloca =
3493 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3494
3495 Value *FakeStack;
3496 Value *LocalStackBase;
3497 Value *LocalStackBaseAlloca;
3498 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3499
3500 if (DoStackMalloc) {
3501 LocalStackBaseAlloca =
3502 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3503 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3504 // void *FakeStack = __asan_option_detect_stack_use_after_return
3505 // ? __asan_stack_malloc_N(LocalStackSize)
3506 // : nullptr;
3507 // void *LocalStackBase = (FakeStack) ? FakeStack :
3508 // alloca(LocalStackSize);
3509 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3511 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3512 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3514 Instruction *Term =
3515 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3516 IRBuilder<> IRBIf(Term);
3517 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3518 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3519 Value *FakeStackValue =
3520 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3521 ConstantInt::get(IntptrTy, LocalStackSize));
3522 IRB.SetInsertPoint(InsBefore);
3523 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3524 ConstantInt::get(IntptrTy, 0));
3525 } else {
3526 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3527 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3528 // void *LocalStackBase = (FakeStack) ? FakeStack :
3529 // alloca(LocalStackSize);
3530 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3531 FakeStack =
3532 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3533 ConstantInt::get(IntptrTy, LocalStackSize));
3534 }
3535 Value *NoFakeStack =
3536 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3537 Instruction *Term =
3538 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3539 IRBuilder<> IRBIf(Term);
3540 Value *AllocaValue =
3541 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3542
3543 IRB.SetInsertPoint(InsBefore);
3544 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3545 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3546 DIExprFlags |= DIExpression::DerefBefore;
3547 } else {
3548 // void *FakeStack = nullptr;
3549 // void *LocalStackBase = alloca(LocalStackSize);
3550 FakeStack = ConstantInt::get(IntptrTy, 0);
3551 LocalStackBase =
3552 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3553 LocalStackBaseAlloca = LocalStackBase;
3554 }
3555
3556 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3557 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3558 // later passes and can result in dropped variable coverage in debug info.
3559 Value *LocalStackBaseAllocaPtr =
3560 isa<PtrToIntInst>(LocalStackBaseAlloca)
3561 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3562 : LocalStackBaseAlloca;
3563 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3564 "Variable descriptions relative to ASan stack base will be dropped");
3565
3566 // Replace Alloca instructions with base+offset.
3567 for (const auto &Desc : SVD) {
3568 AllocaInst *AI = Desc.AI;
3569 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3570 Desc.Offset);
3571 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3572 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3573 AI->getType());
3574 AI->replaceAllUsesWith(NewAllocaPtr);
3575 }
3576
3577 // The left-most redzone has enough space for at least 4 pointers.
3578 // Write the Magic value to redzone[0].
3579 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3580 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3581 BasePlus0);
3582 // Write the frame description constant to redzone[1].
3583 Value *BasePlus1 = IRB.CreateIntToPtr(
3584 IRB.CreateAdd(LocalStackBase,
3585 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3586 IntptrPtrTy);
3587 GlobalVariable *StackDescriptionGlobal =
3588 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3589 /*AllowMerging*/ true, genName("stack"));
3590 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3591 IRB.CreateStore(Description, BasePlus1);
3592 // Write the PC to redzone[2].
3593 Value *BasePlus2 = IRB.CreateIntToPtr(
3594 IRB.CreateAdd(LocalStackBase,
3595 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3596 IntptrPtrTy);
3597 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3598
3599 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3600
3601 // Poison the stack red zones at the entry.
3602 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3603 // As mask we must use most poisoned case: red zones and after scope.
3604 // As bytes we can use either the same or just red zones only.
3605 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3606
3607 if (!StaticAllocaPoisonCallVec.empty()) {
3608 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3609
3610 // Poison static allocas near lifetime intrinsics.
3611 for (const auto &APC : StaticAllocaPoisonCallVec) {
3612 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3613 assert(Desc.Offset % L.Granularity == 0);
3614 size_t Begin = Desc.Offset / L.Granularity;
3615 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3616
3617 IRBuilder<> IRB(APC.InsBefore);
3618 copyToShadow(ShadowAfterScope,
3619 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3620 IRB, ShadowBase);
3621 }
3622 }
3623
3624 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3625 SmallVector<uint8_t, 64> ShadowAfterReturn;
3626
3627 // (Un)poison the stack before all ret instructions.
3628 for (Instruction *Ret : RetVec) {
3629 IRBuilder<> IRBRet(Ret);
3630 // Mark the current frame as retired.
3631 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3632 BasePlus0);
3633 if (DoStackMalloc) {
3634 assert(StackMallocIdx >= 0);
3635 // if FakeStack != 0 // LocalStackBase == FakeStack
3636 // // In use-after-return mode, poison the whole stack frame.
3637 // if StackMallocIdx <= 4
3638 // // For small sizes inline the whole thing:
3639 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3640 // **SavedFlagPtr(FakeStack) = 0
3641 // else
3642 // __asan_stack_free_N(FakeStack, LocalStackSize)
3643 // else
3644 // <This is not a fake stack; unpoison the redzones>
3645 Value *Cmp =
3646 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3647 Instruction *ThenTerm, *ElseTerm;
3648 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3649
3650 IRBuilder<> IRBPoison(ThenTerm);
3651 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3652 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3653 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3655 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3656 ShadowBase);
3657 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3658 FakeStack,
3659 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3660 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3661 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3662 IRBPoison.CreateStore(
3663 Constant::getNullValue(IRBPoison.getInt8Ty()),
3664 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3665 } else {
3666 // For larger frames call __asan_stack_free_*.
3667 RTCI.createRuntimeCall(
3668 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3669 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3670 }
3671
3672 IRBuilder<> IRBElse(ElseTerm);
3673 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3674 } else {
3675 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3676 }
3677 }
3678
3679 // We are done. Remove the old unused alloca instructions.
3680 for (auto *AI : AllocaVec)
3681 AI->eraseFromParent();
3682}
3683
3684void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3685 IRBuilder<> &IRB, bool DoPoison) {
3686 // For now just insert the call to ASan runtime.
3687 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3688 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3689 RTCI.createRuntimeCall(
3690 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3691 {AddrArg, SizeArg});
3692}
3693
3694// Handling llvm.lifetime intrinsics for a given %alloca:
3695// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3696// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3697// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3698// could be poisoned by previous llvm.lifetime.end instruction, as the
3699// variable may go in and out of scope several times, e.g. in loops).
3700// (3) if we poisoned at least one %alloca in a function,
3701// unpoison the whole stack frame at function exit.
3702void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3703 IRBuilder<> IRB(AI);
3704
3705 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3706 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3707
3708 Value *Zero = Constant::getNullValue(IntptrTy);
3709 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3710 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3711
3712 // Since we need to extend alloca with additional memory to locate
3713 // redzones, and OldSize is number of allocated blocks with
3714 // ElementSize size, get allocated memory size in bytes by
3715 // OldSize * ElementSize.
3716 const unsigned ElementSize =
3717 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3718 Value *OldSize =
3719 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3720 ConstantInt::get(IntptrTy, ElementSize));
3721
3722 // PartialSize = OldSize % 32
3723 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3724
3725 // Misalign = kAllocaRzSize - PartialSize;
3726 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3727
3728 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3729 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3730 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3731
3732 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3733 // Alignment is added to locate left redzone, PartialPadding for possible
3734 // partial redzone and kAllocaRzSize for right redzone respectively.
3735 Value *AdditionalChunkSize = IRB.CreateAdd(
3736 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3737 PartialPadding);
3738
3739 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3740
3741 // Insert new alloca with new NewSize and Alignment params.
3742 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3743 NewAlloca->setAlignment(Alignment);
3744
3745 // NewAddress = Address + Alignment
3746 Value *NewAddress =
3747 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3748 ConstantInt::get(IntptrTy, Alignment.value()));
3749
3750 // Insert __asan_alloca_poison call for new created alloca.
3751 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3752
3753 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3754 // for unpoisoning stuff.
3755 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3756
3757 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3758
3759 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3760 AI->replaceAllUsesWith(NewAddressPtr);
3761
3762 // We are done. Erase old alloca from parent.
3763 AI->eraseFromParent();
3764}
3765
3766// isSafeAccess returns true if Addr is always inbounds with respect to its
3767// base object. For example, it is a field access or an array access with
3768// constant inbounds index.
3769bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3770 Value *Addr, TypeSize TypeStoreSize) const {
3771 if (TypeStoreSize.isScalable())
3772 // TODO: We can use vscale_range to convert a scalable value to an
3773 // upper bound on the access size.
3774 return false;
3775
3776 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3777 if (!SizeOffset.bothKnown())
3778 return false;
3779
3780 uint64_t Size = SizeOffset.Size.getZExtValue();
3781 int64_t Offset = SizeOffset.Offset.getSExtValue();
3782
3783 // Three checks are required to ensure safety:
3784 // . Offset >= 0 (since the offset is given from the base ptr)
3785 // . Size >= Offset (unsigned)
3786 // . Size - Offset >= NeededSize (unsigned)
3787 return Offset >= 0 && Size >= uint64_t(Offset) &&
3788 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3789}
@ Poison
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
static const uint64_t kEmscriptenShadowOffset
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
This defines the Use class.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
#define OP(OPC)
Definition: SandboxIR.h:655
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file contains some functions that are useful when dealing with strings.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1498
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1520
AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
Definition: Instructions.h:126
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:635
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
Definition: Attributes.h:606
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:292
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
Definition: InstrTypes.h:2008
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition: Comdat.h:38
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition: Comdat.h:40
@ Any
The linker may choose any COMDAT.
Definition: Comdat.h:36
@ NoDeduplicate
No deduplication is performed.
Definition: Comdat.h:39
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition: Comdat.h:37
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1292
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2281
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2227
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1253
static bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
Definition: Constants.cpp:1575
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1800
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1357
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:432
Debug location.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition: Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition: Function.cpp:401
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:903
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1993
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:550
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
Definition: Globals.cpp:137
void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
Definition: Metadata.cpp:1762
void setComdat(Comdat *C)
Definition: Globals.cpp:206
void setSection(StringRef S)
Change the section for this global.
Definition: Globals.cpp:267
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:231
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:567
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:271
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:254
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ CommonLinkage
Tentative definitions.
Definition: GlobalValue.h:62
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition: GlobalValue.h:53
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition: GlobalValue.h:61
DLLStorageClassTypes getDLLStorageClass() const
Definition: GlobalValue.h:275
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition: Globals.cpp:514
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1790
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:508
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2480
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2190
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2289
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1091
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:105
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1996
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:528
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2265
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1883
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:483
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2417
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1766
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition: IRBuilder.h:494
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1807
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1820
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2137
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2569
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1514
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2216
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:561
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1843
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:656
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2152
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1378
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
Definition: InstVisitor.h:244
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitReturnInst(ReturnInst &I)
Definition: InstVisitor.h:226
RetTy visitAllocaInst(AllocaInst &I)
Definition: InstVisitor.h:168
RetTy visitResumeInst(ResumeInst &I)
Definition: InstVisitor.h:238
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:363
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:824
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
Definition: Instructions.h:174
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
Metadata node.
Definition: Metadata.h:1069
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1542
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Evaluate the size and offset of an object pointed to by a Value* statically.
SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1189
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:435
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:361
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:771
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition: Triple.h:852
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition: Triple.h:553
bool isOSNetBSD() const
Definition: Triple.h:576
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:769
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition: Triple.h:943
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition: Triple.h:932
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:390
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition: Triple.h:938
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
@ DXContainer
Definition: Triple.h:301
@ UnknownObjectFormat
Definition: Triple.h:298
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition: Triple.h:857
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:678
bool isAMDGPU() const
Definition: Triple.h:847
bool isMacOSX() const
Is this a Mac OS X triple.
Definition: Triple.h:522
bool isOSFreeBSD() const
Definition: Triple.h:584
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:698
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition: Triple.h:541
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:531
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition: Triple.h:766
bool isOSFuchsia() const
Definition: Triple.h:588
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
static IntegerType * getInt32Ty(LLVMContext &C)
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:501
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition: MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
Definition: X86BaseInfo.h:732
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
@ Done
Definition: Threading.h:61
Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Op::Description Desc
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, Instruction *InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
@ Dynamic
Denotes mode unknown at compile time.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AsanCtorKind
Types of ASan module constructors supported.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4103
void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
Definition: ModuleUtils.cpp:78
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition: Demangle.cpp:20
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:2132
#define N
ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
Various options to control the behavior of getObjectSize.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.
bool bothKnown() const