LLVM 19.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Comdat.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DIBuilder.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalAlias.h"
49#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/IRBuilder.h"
52#include "llvm/IR/InlineAsm.h"
53#include "llvm/IR/InstVisitor.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
63#include "llvm/IR/Type.h"
64#include "llvm/IR/Use.h"
65#include "llvm/IR/Value.h"
69#include "llvm/Support/Debug.h"
82#include <algorithm>
83#include <cassert>
84#include <cstddef>
85#include <cstdint>
86#include <iomanip>
87#include <limits>
88#include <sstream>
89#include <string>
90#include <tuple>
91
92using namespace llvm;
93
94#define DEBUG_TYPE "asan"
95
97static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
98static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
100 std::numeric_limits<uint64_t>::max();
101static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
103static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
104static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
105static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
106static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
107static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
108static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
109static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
110static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
112static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
113static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
114static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
115static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
116static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
117static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
118static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
119static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
120static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
122
123// The shadow memory space is dynamically allocated.
125
126static const size_t kMinStackMallocSize = 1 << 6; // 64B
127static const size_t kMaxStackMallocSize = 1 << 16; // 64K
128static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
129static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
130
131const char kAsanModuleCtorName[] = "asan.module_ctor";
132const char kAsanModuleDtorName[] = "asan.module_dtor";
134// On Emscripten, the system needs more than one priorities for constructors.
136const char kAsanReportErrorTemplate[] = "__asan_report_";
137const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
138const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
139const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
141 "__asan_unregister_image_globals";
142const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
143const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
144const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
145const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
146const char kAsanInitName[] = "__asan_init";
147const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
148const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
149const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
150const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
151static const int kMaxAsanStackMallocSizeClass = 10;
152const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
154 "__asan_stack_malloc_always_";
155const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
156const char kAsanGenPrefix[] = "___asan_gen_";
157const char kODRGenPrefix[] = "__odr_asan_gen_";
158const char kSanCovGenPrefix[] = "__sancov_gen_";
159const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
160const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
161const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
162
163// ASan version script has __asan_* wildcard. Triple underscore prevents a
164// linker (gold) warning about attempting to export a local symbol.
165const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
166
168 "__asan_option_detect_stack_use_after_return";
169
171 "__asan_shadow_memory_dynamic_address";
172
173const char kAsanAllocaPoison[] = "__asan_alloca_poison";
174const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
175
176const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
177const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
178const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
179const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
180
181// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
182static const size_t kNumberOfAccessSizes = 5;
183
184static const uint64_t kAllocaRzSize = 32;
185
186// ASanAccessInfo implementation constants.
187constexpr size_t kCompileKernelShift = 0;
188constexpr size_t kCompileKernelMask = 0x1;
189constexpr size_t kAccessSizeIndexShift = 1;
190constexpr size_t kAccessSizeIndexMask = 0xf;
191constexpr size_t kIsWriteShift = 5;
192constexpr size_t kIsWriteMask = 0x1;
193
194// Command-line flags.
195
197 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
198 cl::Hidden, cl::init(false));
199
201 "asan-recover",
202 cl::desc("Enable recovery mode (continue-after-error)."),
203 cl::Hidden, cl::init(false));
204
206 "asan-guard-against-version-mismatch",
207 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
208 cl::init(true));
209
210// This flag may need to be replaced with -f[no-]asan-reads.
211static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
212 cl::desc("instrument read instructions"),
213 cl::Hidden, cl::init(true));
214
216 "asan-instrument-writes", cl::desc("instrument write instructions"),
217 cl::Hidden, cl::init(true));
218
219static cl::opt<bool>
220 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
221 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
223
225 "asan-instrument-atomics",
226 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
227 cl::init(true));
228
229static cl::opt<bool>
230 ClInstrumentByval("asan-instrument-byval",
231 cl::desc("instrument byval call arguments"), cl::Hidden,
232 cl::init(true));
233
235 "asan-always-slow-path",
236 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
237 cl::init(false));
238
240 "asan-force-dynamic-shadow",
241 cl::desc("Load shadow address into a local variable for each function"),
242 cl::Hidden, cl::init(false));
243
244static cl::opt<bool>
245 ClWithIfunc("asan-with-ifunc",
246 cl::desc("Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
248 cl::Hidden, cl::init(true));
249
251 "asan-with-ifunc-suppress-remat",
252 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
254 cl::Hidden, cl::init(true));
255
256// This flag limits the number of instructions to be instrumented
257// in any given BB. Normally, this should be set to unlimited (INT_MAX),
258// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
259// set it to 10000.
261 "asan-max-ins-per-bb", cl::init(10000),
262 cl::desc("maximal number of instructions to instrument in any given BB"),
263 cl::Hidden);
264
265// This flag may need to be replaced with -f[no]asan-stack.
266static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
267 cl::Hidden, cl::init(true));
269 "asan-max-inline-poisoning-size",
270 cl::desc(
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
272 cl::Hidden, cl::init(64));
273
275 "asan-use-after-return",
276 cl::desc("Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
285 "Always detect stack use after return.")),
286 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
287
288static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
289 cl::desc("Create redzones for byval "
290 "arguments (extra copy "
291 "required)"), cl::Hidden,
292 cl::init(true));
293
294static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
295 cl::desc("Check stack-use-after-scope"),
296 cl::Hidden, cl::init(false));
297
298// This flag may need to be replaced with -f[no]asan-globals.
299static cl::opt<bool> ClGlobals("asan-globals",
300 cl::desc("Handle global objects"), cl::Hidden,
301 cl::init(true));
302
303static cl::opt<bool> ClInitializers("asan-initialization-order",
304 cl::desc("Handle C++ initializer order"),
305 cl::Hidden, cl::init(true));
306
308 "asan-detect-invalid-pointer-pair",
309 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
310 cl::init(false));
311
313 "asan-detect-invalid-pointer-cmp",
314 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
315 cl::init(false));
316
318 "asan-detect-invalid-pointer-sub",
319 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
320 cl::init(false));
321
323 "asan-realign-stack",
324 cl::desc("Realign stack to the value of this flag (power of two)"),
325 cl::Hidden, cl::init(32));
326
328 "asan-instrumentation-with-call-threshold",
329 cl::desc("If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
332 cl::Hidden, cl::init(7000));
333
335 "asan-memory-access-callback-prefix",
336 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
337 cl::init("__asan_"));
338
340 "asan-kernel-mem-intrinsic-prefix",
341 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
342 cl::init(false));
343
344static cl::opt<bool>
345 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
346 cl::desc("instrument dynamic allocas"),
347 cl::Hidden, cl::init(true));
348
350 "asan-skip-promotable-allocas",
351 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
352 cl::init(true));
353
355 "asan-constructor-kind",
356 cl::desc("Sets the ASan constructor kind"),
357 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
358 clEnumValN(AsanCtorKind::Global, "global",
359 "Use global constructors")),
360 cl::init(AsanCtorKind::Global), cl::Hidden);
361// These flags allow to change the shadow mapping.
362// The shadow mapping looks like
363// Shadow = (Mem >> scale) + offset
364
365static cl::opt<int> ClMappingScale("asan-mapping-scale",
366 cl::desc("scale of asan shadow mapping"),
367 cl::Hidden, cl::init(0));
368
370 ClMappingOffset("asan-mapping-offset",
371 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
372 cl::Hidden, cl::init(0));
373
374// Optimization flags. Not user visible, used mostly for testing
375// and benchmarking the tool.
376
377static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
378 cl::Hidden, cl::init(true));
379
380static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
381 cl::desc("Optimize callbacks"),
382 cl::Hidden, cl::init(false));
383
385 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
386 cl::Hidden, cl::init(true));
387
388static cl::opt<bool> ClOptGlobals("asan-opt-globals",
389 cl::desc("Don't instrument scalar globals"),
390 cl::Hidden, cl::init(true));
391
393 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
394 cl::Hidden, cl::init(false));
395
397 "asan-stack-dynamic-alloca",
398 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
399 cl::init(true));
400
402 "asan-force-experiment",
403 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
404 cl::init(0));
405
406static cl::opt<bool>
407 ClUsePrivateAlias("asan-use-private-alias",
408 cl::desc("Use private aliases for global variables"),
409 cl::Hidden, cl::init(true));
410
411static cl::opt<bool>
412 ClUseOdrIndicator("asan-use-odr-indicator",
413 cl::desc("Use odr indicators to improve ODR reporting"),
414 cl::Hidden, cl::init(true));
415
416static cl::opt<bool>
417 ClUseGlobalsGC("asan-globals-live-support",
418 cl::desc("Use linker features to support dead "
419 "code stripping of globals"),
420 cl::Hidden, cl::init(true));
421
422// This is on by default even though there is a bug in gold:
423// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
424static cl::opt<bool>
425 ClWithComdat("asan-with-comdat",
426 cl::desc("Place ASan constructors in comdat sections"),
427 cl::Hidden, cl::init(true));
428
430 "asan-destructor-kind",
431 cl::desc("Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
433 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
434 clEnumValN(AsanDtorKind::Global, "global",
435 "Use global destructors")),
436 cl::init(AsanDtorKind::Invalid), cl::Hidden);
437
438// Debug flags.
439
440static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
441 cl::init(0));
442
443static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
444 cl::Hidden, cl::init(0));
445
447 cl::desc("Debug func"));
448
449static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
450 cl::Hidden, cl::init(-1));
451
452static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
453 cl::Hidden, cl::init(-1));
454
455STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
457STATISTIC(NumOptimizedAccessesToGlobalVar,
458 "Number of optimized accesses to global vars");
459STATISTIC(NumOptimizedAccessesToStackVar,
460 "Number of optimized accesses to stack vars");
461
462namespace {
463
464/// This struct defines the shadow mapping using the rule:
465/// shadow = (mem >> Scale) ADD-or-OR Offset.
466/// If InGlobal is true, then
467/// extern char __asan_shadow[];
468/// shadow = (mem >> Scale) + &__asan_shadow
469struct ShadowMapping {
470 int Scale;
471 uint64_t Offset;
472 bool OrShadowOffset;
473 bool InGlobal;
474};
475
476} // end anonymous namespace
477
478static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
479 bool IsKasan) {
480 bool IsAndroid = TargetTriple.isAndroid();
481 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
482 TargetTriple.isDriverKit();
483 bool IsMacOS = TargetTriple.isMacOSX();
484 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
485 bool IsNetBSD = TargetTriple.isOSNetBSD();
486 bool IsPS = TargetTriple.isPS();
487 bool IsLinux = TargetTriple.isOSLinux();
488 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
489 TargetTriple.getArch() == Triple::ppc64le;
490 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
491 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
492 bool IsMIPSN32ABI = TargetTriple.getEnvironment() == Triple::GNUABIN32;
493 bool IsMIPS32 = TargetTriple.isMIPS32();
494 bool IsMIPS64 = TargetTriple.isMIPS64();
495 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
496 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
497 TargetTriple.getArch() == Triple::aarch64_be;
498 bool IsLoongArch64 = TargetTriple.isLoongArch64();
499 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
500 bool IsWindows = TargetTriple.isOSWindows();
501 bool IsFuchsia = TargetTriple.isOSFuchsia();
502 bool IsEmscripten = TargetTriple.isOSEmscripten();
503 bool IsAMDGPU = TargetTriple.isAMDGPU();
504
505 ShadowMapping Mapping;
506
507 Mapping.Scale = kDefaultShadowScale;
508 if (ClMappingScale.getNumOccurrences() > 0) {
509 Mapping.Scale = ClMappingScale;
510 }
511
512 if (LongSize == 32) {
513 if (IsAndroid)
514 Mapping.Offset = kDynamicShadowSentinel;
515 else if (IsMIPSN32ABI)
516 Mapping.Offset = kMIPS_ShadowOffsetN32;
517 else if (IsMIPS32)
518 Mapping.Offset = kMIPS32_ShadowOffset32;
519 else if (IsFreeBSD)
520 Mapping.Offset = kFreeBSD_ShadowOffset32;
521 else if (IsNetBSD)
522 Mapping.Offset = kNetBSD_ShadowOffset32;
523 else if (IsIOS)
524 Mapping.Offset = kDynamicShadowSentinel;
525 else if (IsWindows)
526 Mapping.Offset = kWindowsShadowOffset32;
527 else if (IsEmscripten)
528 Mapping.Offset = kEmscriptenShadowOffset;
529 else
530 Mapping.Offset = kDefaultShadowOffset32;
531 } else { // LongSize == 64
532 // Fuchsia is always PIE, which means that the beginning of the address
533 // space is always available.
534 if (IsFuchsia)
535 Mapping.Offset = 0;
536 else if (IsPPC64)
537 Mapping.Offset = kPPC64_ShadowOffset64;
538 else if (IsSystemZ)
539 Mapping.Offset = kSystemZ_ShadowOffset64;
540 else if (IsFreeBSD && IsAArch64)
541 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
542 else if (IsFreeBSD && !IsMIPS64) {
543 if (IsKasan)
544 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
545 else
546 Mapping.Offset = kFreeBSD_ShadowOffset64;
547 } else if (IsNetBSD) {
548 if (IsKasan)
549 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
550 else
551 Mapping.Offset = kNetBSD_ShadowOffset64;
552 } else if (IsPS)
553 Mapping.Offset = kPS_ShadowOffset64;
554 else if (IsLinux && IsX86_64) {
555 if (IsKasan)
556 Mapping.Offset = kLinuxKasan_ShadowOffset64;
557 else
558 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
559 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
560 } else if (IsWindows && IsX86_64) {
561 Mapping.Offset = kWindowsShadowOffset64;
562 } else if (IsMIPS64)
563 Mapping.Offset = kMIPS64_ShadowOffset64;
564 else if (IsIOS)
565 Mapping.Offset = kDynamicShadowSentinel;
566 else if (IsMacOS && IsAArch64)
567 Mapping.Offset = kDynamicShadowSentinel;
568 else if (IsAArch64)
569 Mapping.Offset = kAArch64_ShadowOffset64;
570 else if (IsLoongArch64)
571 Mapping.Offset = kLoongArch64_ShadowOffset64;
572 else if (IsRISCV64)
573 Mapping.Offset = kRISCV64_ShadowOffset64;
574 else if (IsAMDGPU)
575 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
576 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
577 else
578 Mapping.Offset = kDefaultShadowOffset64;
579 }
580
582 Mapping.Offset = kDynamicShadowSentinel;
583 }
584
585 if (ClMappingOffset.getNumOccurrences() > 0) {
586 Mapping.Offset = ClMappingOffset;
587 }
588
589 // OR-ing shadow offset if more efficient (at least on x86) if the offset
590 // is a power of two, but on ppc64 and loongarch64 we have to use add since
591 // the shadow offset is not necessarily 1/8-th of the address space. On
592 // SystemZ, we could OR the constant in a single instruction, but it's more
593 // efficient to load it once and use indexed addressing.
594 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
595 !IsRISCV64 && !IsLoongArch64 &&
596 !(Mapping.Offset & (Mapping.Offset - 1)) &&
597 Mapping.Offset != kDynamicShadowSentinel;
598 bool IsAndroidWithIfuncSupport =
599 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
600 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
601
602 return Mapping;
603}
604
605namespace llvm {
606void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
607 bool IsKasan, uint64_t *ShadowBase,
608 int *MappingScale, bool *OrShadowOffset) {
609 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
610 *ShadowBase = Mapping.Offset;
611 *MappingScale = Mapping.Scale;
612 *OrShadowOffset = Mapping.OrShadowOffset;
613}
614
616 : Packed(Packed),
617 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
618 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
619 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
620
621ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
622 uint8_t AccessSizeIndex)
623 : Packed((IsWrite << kIsWriteShift) +
624 (CompileKernel << kCompileKernelShift) +
625 (AccessSizeIndex << kAccessSizeIndexShift)),
626 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
627 CompileKernel(CompileKernel) {}
628
629} // namespace llvm
630
631static uint64_t getRedzoneSizeForScale(int MappingScale) {
632 // Redzone used for stack and globals is at least 32 bytes.
633 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
634 return std::max(32U, 1U << MappingScale);
635}
636
638 if (TargetTriple.isOSEmscripten()) {
640 } else {
642 }
643}
644
645namespace {
646/// Helper RAII class to post-process inserted asan runtime calls during a
647/// pass on a single Function. Upon end of scope, detects and applies the
648/// required funclet OpBundle.
649class RuntimeCallInserter {
650 Function *OwnerFn = nullptr;
651 bool TrackInsertedCalls = false;
652 SmallVector<CallInst *> InsertedCalls;
653
654public:
655 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
656 if (Fn.hasPersonalityFn()) {
657 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
658 if (isScopedEHPersonality(Personality))
659 TrackInsertedCalls = true;
660 }
661 }
662
663 ~RuntimeCallInserter() {
664 if (InsertedCalls.empty())
665 return;
666 assert(TrackInsertedCalls && "Calls were wrongly tracked");
667
669 for (CallInst *CI : InsertedCalls) {
670 BasicBlock *BB = CI->getParent();
671 assert(BB && "Instruction doesn't belong to a BasicBlock");
672 assert(BB->getParent() == OwnerFn &&
673 "Instruction doesn't belong to the expected Function!");
674
675 ColorVector &Colors = BlockColors[BB];
676 // funclet opbundles are only valid in monochromatic BBs.
677 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
678 // and will be DCE'ed later.
679 if (Colors.empty())
680 continue;
681 if (Colors.size() != 1) {
682 OwnerFn->getContext().emitError(
683 "Instruction's BasicBlock is not monochromatic");
684 continue;
685 }
686
687 BasicBlock *Color = Colors.front();
688 Instruction *EHPad = Color->getFirstNonPHI();
689
690 if (EHPad && EHPad->isEHPad()) {
691 // Replace CI with a clone with an added funclet OperandBundle
692 OperandBundleDef OB("funclet", EHPad);
693 auto *NewCall =
695 NewCall->copyMetadata(*CI);
696 CI->replaceAllUsesWith(NewCall);
697 CI->eraseFromParent();
698 }
699 }
700 }
701
702 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
703 ArrayRef<Value *> Args = {},
704 const Twine &Name = "") {
705 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
706
707 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
708 if (TrackInsertedCalls)
709 InsertedCalls.push_back(Inst);
710 return Inst;
711 }
712};
713
714/// AddressSanitizer: instrument the code in module to find memory bugs.
715struct AddressSanitizer {
716 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
717 int InstrumentationWithCallsThreshold,
718 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
719 bool Recover = false, bool UseAfterScope = false,
720 AsanDetectStackUseAfterReturnMode UseAfterReturn =
721 AsanDetectStackUseAfterReturnMode::Runtime)
722 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
723 : CompileKernel),
724 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
725 UseAfterScope(UseAfterScope || ClUseAfterScope),
726 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
727 : UseAfterReturn),
728 SSGI(SSGI),
729 InstrumentationWithCallsThreshold(
730 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
732 : InstrumentationWithCallsThreshold),
733 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
735 : MaxInlinePoisoningSize) {
736 C = &(M.getContext());
737 DL = &M.getDataLayout();
738 LongSize = M.getDataLayout().getPointerSizeInBits();
739 IntptrTy = Type::getIntNTy(*C, LongSize);
740 PtrTy = PointerType::getUnqual(*C);
741 Int32Ty = Type::getInt32Ty(*C);
742 TargetTriple = Triple(M.getTargetTriple());
743
744 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
745
746 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
747 }
748
749 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
750 return *AI.getAllocationSize(AI.getDataLayout());
751 }
752
753 /// Check if we want (and can) handle this alloca.
754 bool isInterestingAlloca(const AllocaInst &AI);
755
756 bool ignoreAccess(Instruction *Inst, Value *Ptr);
757 void getInterestingMemoryOperands(
759
760 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
761 InterestingMemoryOperand &O, bool UseCalls,
762 const DataLayout &DL, RuntimeCallInserter &RTCI);
763 void instrumentPointerComparisonOrSubtraction(Instruction *I,
764 RuntimeCallInserter &RTCI);
765 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
766 Value *Addr, MaybeAlign Alignment,
767 uint32_t TypeStoreSize, bool IsWrite,
768 Value *SizeArgument, bool UseCalls, uint32_t Exp,
769 RuntimeCallInserter &RTCI);
770 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
771 Instruction *InsertBefore, Value *Addr,
772 uint32_t TypeStoreSize, bool IsWrite,
773 Value *SizeArgument);
774 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
775 bool Recover);
776 void instrumentUnusualSizeOrAlignment(Instruction *I,
777 Instruction *InsertBefore, Value *Addr,
778 TypeSize TypeStoreSize, bool IsWrite,
779 Value *SizeArgument, bool UseCalls,
780 uint32_t Exp,
781 RuntimeCallInserter &RTCI);
782 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
783 Type *IntptrTy, Value *Mask, Value *EVL,
784 Value *Stride, Instruction *I, Value *Addr,
785 MaybeAlign Alignment, unsigned Granularity,
786 Type *OpType, bool IsWrite,
787 Value *SizeArgument, bool UseCalls,
788 uint32_t Exp, RuntimeCallInserter &RTCI);
789 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
790 Value *ShadowValue, uint32_t TypeStoreSize);
791 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
792 bool IsWrite, size_t AccessSizeIndex,
793 Value *SizeArgument, uint32_t Exp,
794 RuntimeCallInserter &RTCI);
795 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
796 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
797 bool suppressInstrumentationSiteForDebug(int &Instrumented);
798 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
799 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
800 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
801 void markEscapedLocalAllocas(Function &F);
802
803private:
804 friend struct FunctionStackPoisoner;
805
806 void initializeCallbacks(Module &M, const TargetLibraryInfo *TLI);
807
808 bool LooksLikeCodeInBug11395(Instruction *I);
809 bool GlobalIsLinkerInitialized(GlobalVariable *G);
810 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
811 TypeSize TypeStoreSize) const;
812
813 /// Helper to cleanup per-function state.
814 struct FunctionStateRAII {
815 AddressSanitizer *Pass;
816
817 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
818 assert(Pass->ProcessedAllocas.empty() &&
819 "last pass forgot to clear cache");
820 assert(!Pass->LocalDynamicShadow);
821 }
822
823 ~FunctionStateRAII() {
824 Pass->LocalDynamicShadow = nullptr;
825 Pass->ProcessedAllocas.clear();
826 }
827 };
828
829 LLVMContext *C;
830 const DataLayout *DL;
831 Triple TargetTriple;
832 int LongSize;
833 bool CompileKernel;
834 bool Recover;
835 bool UseAfterScope;
837 Type *IntptrTy;
838 Type *Int32Ty;
839 PointerType *PtrTy;
840 ShadowMapping Mapping;
841 FunctionCallee AsanHandleNoReturnFunc;
842 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
843 Constant *AsanShadowGlobal;
844
845 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
846 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
847 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
848
849 // These arrays is indexed by AccessIsWrite and Experiment.
850 FunctionCallee AsanErrorCallbackSized[2][2];
851 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
852
853 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
854 Value *LocalDynamicShadow = nullptr;
855 const StackSafetyGlobalInfo *SSGI;
856 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
857
858 FunctionCallee AMDGPUAddressShared;
859 FunctionCallee AMDGPUAddressPrivate;
860 int InstrumentationWithCallsThreshold;
861 uint32_t MaxInlinePoisoningSize;
862};
863
864class ModuleAddressSanitizer {
865public:
866 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
867 bool CompileKernel = false, bool Recover = false,
868 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
869 AsanDtorKind DestructorKind = AsanDtorKind::Global,
870 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
871 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
872 : CompileKernel),
873 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
875 : InsertVersionCheck),
876 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
877 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
878 // Enable aliases as they should have no downside with ODR indicators.
879 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
881 : UseOdrIndicator),
882 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
884 : UseOdrIndicator),
885 // Not a typo: ClWithComdat is almost completely pointless without
886 // ClUseGlobalsGC (because then it only works on modules without
887 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
888 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
889 // argument is designed as workaround. Therefore, disable both
890 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
891 // do globals-gc.
892 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
893 DestructorKind(DestructorKind),
894 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
896 : ConstructorKind) {
897 C = &(M.getContext());
898 int LongSize = M.getDataLayout().getPointerSizeInBits();
899 IntptrTy = Type::getIntNTy(*C, LongSize);
900 PtrTy = PointerType::getUnqual(*C);
901 TargetTriple = Triple(M.getTargetTriple());
902 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
903
904 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
905 this->DestructorKind = ClOverrideDestructorKind;
906 assert(this->DestructorKind != AsanDtorKind::Invalid);
907 }
908
909 bool instrumentModule(Module &);
910
911private:
912 void initializeCallbacks(Module &M);
913
914 void instrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
915 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
916 ArrayRef<GlobalVariable *> ExtendedGlobals,
917 ArrayRef<Constant *> MetadataInitializers);
918 void instrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
919 ArrayRef<GlobalVariable *> ExtendedGlobals,
920 ArrayRef<Constant *> MetadataInitializers,
921 const std::string &UniqueModuleId);
922 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
923 ArrayRef<GlobalVariable *> ExtendedGlobals,
924 ArrayRef<Constant *> MetadataInitializers);
925 void
926 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
927 ArrayRef<GlobalVariable *> ExtendedGlobals,
928 ArrayRef<Constant *> MetadataInitializers);
929
930 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
931 StringRef OriginalName);
932 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
933 StringRef InternalSuffix);
934 Instruction *CreateAsanModuleDtor(Module &M);
935
936 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
937 bool shouldInstrumentGlobal(GlobalVariable *G) const;
938 bool ShouldUseMachOGlobalsSection() const;
939 StringRef getGlobalMetadataSection() const;
940 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
941 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
942 uint64_t getMinRedzoneSizeForGlobal() const {
943 return getRedzoneSizeForScale(Mapping.Scale);
944 }
945 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
946 int GetAsanVersion(const Module &M) const;
947
948 bool CompileKernel;
949 bool InsertVersionCheck;
950 bool Recover;
951 bool UseGlobalsGC;
952 bool UsePrivateAlias;
953 bool UseOdrIndicator;
954 bool UseCtorComdat;
955 AsanDtorKind DestructorKind;
956 AsanCtorKind ConstructorKind;
957 Type *IntptrTy;
958 PointerType *PtrTy;
959 LLVMContext *C;
960 Triple TargetTriple;
961 ShadowMapping Mapping;
962 FunctionCallee AsanPoisonGlobals;
963 FunctionCallee AsanUnpoisonGlobals;
964 FunctionCallee AsanRegisterGlobals;
965 FunctionCallee AsanUnregisterGlobals;
966 FunctionCallee AsanRegisterImageGlobals;
967 FunctionCallee AsanUnregisterImageGlobals;
968 FunctionCallee AsanRegisterElfGlobals;
969 FunctionCallee AsanUnregisterElfGlobals;
970
971 Function *AsanCtorFunction = nullptr;
972 Function *AsanDtorFunction = nullptr;
973};
974
975// Stack poisoning does not play well with exception handling.
976// When an exception is thrown, we essentially bypass the code
977// that unpoisones the stack. This is why the run-time library has
978// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
979// stack in the interceptor. This however does not work inside the
980// actual function which catches the exception. Most likely because the
981// compiler hoists the load of the shadow value somewhere too high.
982// This causes asan to report a non-existing bug on 453.povray.
983// It sounds like an LLVM bug.
984struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
985 Function &F;
986 AddressSanitizer &ASan;
987 RuntimeCallInserter &RTCI;
988 DIBuilder DIB;
989 LLVMContext *C;
990 Type *IntptrTy;
991 Type *IntptrPtrTy;
992 ShadowMapping Mapping;
993
995 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
997
998 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
999 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1000 FunctionCallee AsanSetShadowFunc[0x100] = {};
1001 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1002 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1003
1004 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1005 struct AllocaPoisonCall {
1006 IntrinsicInst *InsBefore;
1007 AllocaInst *AI;
1008 uint64_t Size;
1009 bool DoPoison;
1010 };
1011 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1012 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1013 bool HasUntracedLifetimeIntrinsic = false;
1014
1015 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1016 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1017 AllocaInst *DynamicAllocaLayout = nullptr;
1018 IntrinsicInst *LocalEscapeCall = nullptr;
1019
1020 bool HasInlineAsm = false;
1021 bool HasReturnsTwiceCall = false;
1022 bool PoisonStack;
1023
1024 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1025 RuntimeCallInserter &RTCI)
1026 : F(F), ASan(ASan), RTCI(RTCI),
1027 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1028 IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
1029 Mapping(ASan.Mapping),
1030 PoisonStack(ClStack &&
1031 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
1032
1033 bool runOnFunction() {
1034 if (!PoisonStack)
1035 return false;
1036
1038 copyArgsPassedByValToAllocas();
1039
1040 // Collect alloca, ret, lifetime instructions etc.
1041 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1042
1043 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1044
1045 initializeCallbacks(*F.getParent());
1046
1047 if (HasUntracedLifetimeIntrinsic) {
1048 // If there are lifetime intrinsics which couldn't be traced back to an
1049 // alloca, we may not know exactly when a variable enters scope, and
1050 // therefore should "fail safe" by not poisoning them.
1051 StaticAllocaPoisonCallVec.clear();
1052 DynamicAllocaPoisonCallVec.clear();
1053 }
1054
1055 processDynamicAllocas();
1056 processStaticAllocas();
1057
1058 if (ClDebugStack) {
1059 LLVM_DEBUG(dbgs() << F);
1060 }
1061 return true;
1062 }
1063
1064 // Arguments marked with the "byval" attribute are implicitly copied without
1065 // using an alloca instruction. To produce redzones for those arguments, we
1066 // copy them a second time into memory allocated with an alloca instruction.
1067 void copyArgsPassedByValToAllocas();
1068
1069 // Finds all Alloca instructions and puts
1070 // poisoned red zones around all of them.
1071 // Then unpoison everything back before the function returns.
1072 void processStaticAllocas();
1073 void processDynamicAllocas();
1074
1075 void createDynamicAllocasInitStorage();
1076
1077 // ----------------------- Visitors.
1078 /// Collect all Ret instructions, or the musttail call instruction if it
1079 /// precedes the return instruction.
1080 void visitReturnInst(ReturnInst &RI) {
1081 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1082 RetVec.push_back(CI);
1083 else
1084 RetVec.push_back(&RI);
1085 }
1086
1087 /// Collect all Resume instructions.
1088 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1089
1090 /// Collect all CatchReturnInst instructions.
1091 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1092
1093 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1094 Value *SavedStack) {
1095 IRBuilder<> IRB(InstBefore);
1096 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1097 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1098 // need to adjust extracted SP to compute the address of the most recent
1099 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1100 // this purpose.
1101 if (!isa<ReturnInst>(InstBefore)) {
1102 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
1103 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
1104 {IntptrTy});
1105
1106 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
1107
1108 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1109 DynamicAreaOffset);
1110 }
1111
1112 RTCI.createRuntimeCall(
1113 IRB, AsanAllocasUnpoisonFunc,
1114 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1115 }
1116
1117 // Unpoison dynamic allocas redzones.
1118 void unpoisonDynamicAllocas() {
1119 for (Instruction *Ret : RetVec)
1120 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1121
1122 for (Instruction *StackRestoreInst : StackRestoreVec)
1123 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1124 StackRestoreInst->getOperand(0));
1125 }
1126
1127 // Deploy and poison redzones around dynamic alloca call. To do this, we
1128 // should replace this call with another one with changed parameters and
1129 // replace all its uses with new address, so
1130 // addr = alloca type, old_size, align
1131 // is replaced by
1132 // new_size = (old_size + additional_size) * sizeof(type)
1133 // tmp = alloca i8, new_size, max(align, 32)
1134 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1135 // Additional_size is added to make new memory allocation contain not only
1136 // requested memory, but also left, partial and right redzones.
1137 void handleDynamicAllocaCall(AllocaInst *AI);
1138
1139 /// Collect Alloca instructions we want (and can) handle.
1140 void visitAllocaInst(AllocaInst &AI) {
1141 // FIXME: Handle scalable vectors instead of ignoring them.
1142 const Type *AllocaType = AI.getAllocatedType();
1143 const auto *STy = dyn_cast<StructType>(AllocaType);
1144 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1145 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1146 if (AI.isStaticAlloca()) {
1147 // Skip over allocas that are present *before* the first instrumented
1148 // alloca, we don't want to move those around.
1149 if (AllocaVec.empty())
1150 return;
1151
1152 StaticAllocasToMoveUp.push_back(&AI);
1153 }
1154 return;
1155 }
1156
1157 if (!AI.isStaticAlloca())
1158 DynamicAllocaVec.push_back(&AI);
1159 else
1160 AllocaVec.push_back(&AI);
1161 }
1162
1163 /// Collect lifetime intrinsic calls to check for use-after-scope
1164 /// errors.
1166 Intrinsic::ID ID = II.getIntrinsicID();
1167 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1168 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1169 if (!ASan.UseAfterScope)
1170 return;
1171 if (!II.isLifetimeStartOrEnd())
1172 return;
1173 // Found lifetime intrinsic, add ASan instrumentation if necessary.
1174 auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1175 // If size argument is undefined, don't do anything.
1176 if (Size->isMinusOne()) return;
1177 // Check that size doesn't saturate uint64_t and can
1178 // be stored in IntptrTy.
1179 const uint64_t SizeValue = Size->getValue().getLimitedValue();
1180 if (SizeValue == ~0ULL ||
1181 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1182 return;
1183 // Find alloca instruction that corresponds to llvm.lifetime argument.
1184 // Currently we can only handle lifetime markers pointing to the
1185 // beginning of the alloca.
1186 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1187 if (!AI) {
1188 HasUntracedLifetimeIntrinsic = true;
1189 return;
1190 }
1191 // We're interested only in allocas we can handle.
1192 if (!ASan.isInterestingAlloca(*AI))
1193 return;
1194 bool DoPoison = (ID == Intrinsic::lifetime_end);
1195 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1196 if (AI->isStaticAlloca())
1197 StaticAllocaPoisonCallVec.push_back(APC);
1199 DynamicAllocaPoisonCallVec.push_back(APC);
1200 }
1201
1202 void visitCallBase(CallBase &CB) {
1203 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1204 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1205 HasReturnsTwiceCall |= CI->canReturnTwice();
1206 }
1207 }
1208
1209 // ---------------------- Helpers.
1210 void initializeCallbacks(Module &M);
1211
1212 // Copies bytes from ShadowBytes into shadow memory for indexes where
1213 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1214 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1215 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1216 IRBuilder<> &IRB, Value *ShadowBase);
1217 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1218 size_t Begin, size_t End, IRBuilder<> &IRB,
1219 Value *ShadowBase);
1220 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1221 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1222 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1223
1224 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1225
1226 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1227 bool Dynamic);
1228 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1229 Instruction *ThenTerm, Value *ValueIfFalse);
1230};
1231
1232} // end anonymous namespace
1233
1235 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1237 OS, MapClassName2PassName);
1238 OS << '<';
1239 if (Options.CompileKernel)
1240 OS << "kernel";
1241 OS << '>';
1242}
1243
1245 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1246 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1247 AsanCtorKind ConstructorKind)
1248 : Options(Options), UseGlobalGC(UseGlobalGC),
1249 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1250 ConstructorKind(ConstructorKind) {}
1251
1254 ModuleAddressSanitizer ModuleSanitizer(
1255 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1256 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1257 bool Modified = false;
1258 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1259 const StackSafetyGlobalInfo *const SSGI =
1261 for (Function &F : M) {
1262 AddressSanitizer FunctionSanitizer(
1263 M, SSGI, Options.InstrumentationWithCallsThreshold,
1264 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1265 Options.UseAfterScope, Options.UseAfterReturn);
1267 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1268 }
1269 Modified |= ModuleSanitizer.instrumentModule(M);
1270 if (!Modified)
1271 return PreservedAnalyses::all();
1272
1274 // GlobalsAA is considered stateless and does not get invalidated unless
1275 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1276 // make changes that require GlobalsAA to be invalidated.
1277 PA.abandon<GlobalsAA>();
1278 return PA;
1279}
1280
1282 size_t Res = llvm::countr_zero(TypeSize / 8);
1284 return Res;
1285}
1286
1287/// Check if \p G has been created by a trusted compiler pass.
1289 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1290 if (G->getName().starts_with("llvm.") ||
1291 // Do not instrument gcov counter arrays.
1292 G->getName().starts_with("__llvm_gcov_ctr") ||
1293 // Do not instrument rtti proxy symbols for function sanitizer.
1294 G->getName().starts_with("__llvm_rtti_proxy"))
1295 return true;
1296
1297 // Do not instrument asan globals.
1298 if (G->getName().starts_with(kAsanGenPrefix) ||
1299 G->getName().starts_with(kSanCovGenPrefix) ||
1300 G->getName().starts_with(kODRGenPrefix))
1301 return true;
1302
1303 return false;
1304}
1305
1307 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1308 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1309 if (AddrSpace == 3 || AddrSpace == 5)
1310 return true;
1311 return false;
1312}
1313
1314Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1315 // Shadow >> scale
1316 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1317 if (Mapping.Offset == 0) return Shadow;
1318 // (Shadow >> scale) | offset
1319 Value *ShadowBase;
1320 if (LocalDynamicShadow)
1321 ShadowBase = LocalDynamicShadow;
1322 else
1323 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1324 if (Mapping.OrShadowOffset)
1325 return IRB.CreateOr(Shadow, ShadowBase);
1326 else
1327 return IRB.CreateAdd(Shadow, ShadowBase);
1328}
1329
1330// Instrument memset/memmove/memcpy
1331void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1332 RuntimeCallInserter &RTCI) {
1334 if (isa<MemTransferInst>(MI)) {
1335 RTCI.createRuntimeCall(
1336 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1337 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1338 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1339 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1340 } else if (isa<MemSetInst>(MI)) {
1341 RTCI.createRuntimeCall(
1342 IRB, AsanMemset,
1343 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1344 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1345 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1346 }
1347 MI->eraseFromParent();
1348}
1349
1350/// Check if we want (and can) handle this alloca.
1351bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1352 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1353
1354 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1355 return PreviouslySeenAllocaInfo->getSecond();
1356
1357 bool IsInteresting =
1358 (AI.getAllocatedType()->isSized() &&
1359 // alloca() may be called with 0 size, ignore it.
1360 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1361 // We are only interested in allocas not promotable to registers.
1362 // Promotable allocas are common under -O0.
1364 // inalloca allocas are not treated as static, and we don't want
1365 // dynamic alloca instrumentation for them as well.
1366 !AI.isUsedWithInAlloca() &&
1367 // swifterror allocas are register promoted by ISel
1368 !AI.isSwiftError() &&
1369 // safe allocas are not interesting
1370 !(SSGI && SSGI->isSafe(AI)));
1371
1372 ProcessedAllocas[&AI] = IsInteresting;
1373 return IsInteresting;
1374}
1375
1376bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1377 // Instrument accesses from different address spaces only for AMDGPU.
1378 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1379 if (PtrTy->getPointerAddressSpace() != 0 &&
1380 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1381 return true;
1382
1383 // Ignore swifterror addresses.
1384 // swifterror memory addresses are mem2reg promoted by instruction
1385 // selection. As such they cannot have regular uses like an instrumentation
1386 // function and it makes no sense to track them as memory.
1387 if (Ptr->isSwiftError())
1388 return true;
1389
1390 // Treat memory accesses to promotable allocas as non-interesting since they
1391 // will not cause memory violations. This greatly speeds up the instrumented
1392 // executable at -O0.
1393 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1394 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1395 return true;
1396
1397 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1399 return true;
1400
1401 return false;
1402}
1403
1404void AddressSanitizer::getInterestingMemoryOperands(
1406 // Do not instrument the load fetching the dynamic shadow address.
1407 if (LocalDynamicShadow == I)
1408 return;
1409
1410 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1411 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1412 return;
1413 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1414 LI->getType(), LI->getAlign());
1415 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1416 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1417 return;
1418 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1419 SI->getValueOperand()->getType(), SI->getAlign());
1420 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1421 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1422 return;
1423 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1424 RMW->getValOperand()->getType(), std::nullopt);
1425 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1426 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1427 return;
1428 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1429 XCHG->getCompareOperand()->getType(),
1430 std::nullopt);
1431 } else if (auto CI = dyn_cast<CallInst>(I)) {
1432 switch (CI->getIntrinsicID()) {
1433 case Intrinsic::masked_load:
1434 case Intrinsic::masked_store:
1435 case Intrinsic::masked_gather:
1436 case Intrinsic::masked_scatter: {
1437 bool IsWrite = CI->getType()->isVoidTy();
1438 // Masked store has an initial operand for the value.
1439 unsigned OpOffset = IsWrite ? 1 : 0;
1440 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1441 return;
1442
1443 auto BasePtr = CI->getOperand(OpOffset);
1444 if (ignoreAccess(I, BasePtr))
1445 return;
1446 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1447 MaybeAlign Alignment = Align(1);
1448 // Otherwise no alignment guarantees. We probably got Undef.
1449 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1450 Alignment = Op->getMaybeAlignValue();
1451 Value *Mask = CI->getOperand(2 + OpOffset);
1452 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1453 break;
1454 }
1455 case Intrinsic::masked_expandload:
1456 case Intrinsic::masked_compressstore: {
1457 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1458 unsigned OpOffset = IsWrite ? 1 : 0;
1459 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1460 return;
1461 auto BasePtr = CI->getOperand(OpOffset);
1462 if (ignoreAccess(I, BasePtr))
1463 return;
1464 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1465 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1466
1467 IRBuilder IB(I);
1468 Value *Mask = CI->getOperand(1 + OpOffset);
1469 // Use the popcount of Mask as the effective vector length.
1470 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1471 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1472 Value *EVL = IB.CreateAddReduce(ExtMask);
1473 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1474 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1475 EVL);
1476 break;
1477 }
1478 case Intrinsic::vp_load:
1479 case Intrinsic::vp_store:
1480 case Intrinsic::experimental_vp_strided_load:
1481 case Intrinsic::experimental_vp_strided_store: {
1482 auto *VPI = cast<VPIntrinsic>(CI);
1483 unsigned IID = CI->getIntrinsicID();
1484 bool IsWrite = CI->getType()->isVoidTy();
1485 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1486 return;
1487 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1488 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1489 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1490 Value *Stride = nullptr;
1491 if (IID == Intrinsic::experimental_vp_strided_store ||
1492 IID == Intrinsic::experimental_vp_strided_load) {
1493 Stride = VPI->getOperand(PtrOpNo + 1);
1494 // Use the pointer alignment as the element alignment if the stride is a
1495 // mutiple of the pointer alignment. Otherwise, the element alignment
1496 // should be Align(1).
1497 unsigned PointerAlign = Alignment.valueOrOne().value();
1498 if (!isa<ConstantInt>(Stride) ||
1499 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1500 Alignment = Align(1);
1501 }
1502 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1503 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1504 Stride);
1505 break;
1506 }
1507 case Intrinsic::vp_gather:
1508 case Intrinsic::vp_scatter: {
1509 auto *VPI = cast<VPIntrinsic>(CI);
1510 unsigned IID = CI->getIntrinsicID();
1511 bool IsWrite = IID == Intrinsic::vp_scatter;
1512 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1513 return;
1514 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1515 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1516 MaybeAlign Alignment = VPI->getPointerAlignment();
1517 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1518 VPI->getMaskParam(),
1519 VPI->getVectorLengthParam());
1520 break;
1521 }
1522 default:
1523 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1524 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1525 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1526 continue;
1527 Type *Ty = CI->getParamByValType(ArgNo);
1528 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1529 }
1530 }
1531 }
1532}
1533
1534static bool isPointerOperand(Value *V) {
1535 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1536}
1537
1538// This is a rough heuristic; it may cause both false positives and
1539// false negatives. The proper implementation requires cooperation with
1540// the frontend.
1542 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1543 if (!Cmp->isRelational())
1544 return false;
1545 } else {
1546 return false;
1547 }
1548 return isPointerOperand(I->getOperand(0)) &&
1549 isPointerOperand(I->getOperand(1));
1550}
1551
1552// This is a rough heuristic; it may cause both false positives and
1553// false negatives. The proper implementation requires cooperation with
1554// the frontend.
1556 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1557 if (BO->getOpcode() != Instruction::Sub)
1558 return false;
1559 } else {
1560 return false;
1561 }
1562 return isPointerOperand(I->getOperand(0)) &&
1563 isPointerOperand(I->getOperand(1));
1564}
1565
1566bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1567 // If a global variable does not have dynamic initialization we don't
1568 // have to instrument it. However, if a global does not have initializer
1569 // at all, we assume it has dynamic initializer (in other TU).
1570 if (!G->hasInitializer())
1571 return false;
1572
1573 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1574 return false;
1575
1576 return true;
1577}
1578
1579void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1580 Instruction *I, RuntimeCallInserter &RTCI) {
1581 IRBuilder<> IRB(I);
1582 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1583 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1584 for (Value *&i : Param) {
1585 if (i->getType()->isPointerTy())
1586 i = IRB.CreatePointerCast(i, IntptrTy);
1587 }
1588 RTCI.createRuntimeCall(IRB, F, Param);
1589}
1590
1591static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1592 Instruction *InsertBefore, Value *Addr,
1593 MaybeAlign Alignment, unsigned Granularity,
1594 TypeSize TypeStoreSize, bool IsWrite,
1595 Value *SizeArgument, bool UseCalls,
1596 uint32_t Exp, RuntimeCallInserter &RTCI) {
1597 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1598 // if the data is properly aligned.
1599 if (!TypeStoreSize.isScalable()) {
1600 const auto FixedSize = TypeStoreSize.getFixedValue();
1601 switch (FixedSize) {
1602 case 8:
1603 case 16:
1604 case 32:
1605 case 64:
1606 case 128:
1607 if (!Alignment || *Alignment >= Granularity ||
1608 *Alignment >= FixedSize / 8)
1609 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1610 FixedSize, IsWrite, nullptr, UseCalls,
1611 Exp, RTCI);
1612 }
1613 }
1614 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1615 IsWrite, nullptr, UseCalls, Exp, RTCI);
1616}
1617
1618void AddressSanitizer::instrumentMaskedLoadOrStore(
1619 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1620 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1621 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1622 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1623 RuntimeCallInserter &RTCI) {
1624 auto *VTy = cast<VectorType>(OpType);
1625 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1626 auto Zero = ConstantInt::get(IntptrTy, 0);
1627
1628 IRBuilder IB(I);
1629 Instruction *LoopInsertBefore = I;
1630 if (EVL) {
1631 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1632 // than zero, so we should check whether EVL is zero here.
1633 Type *EVLType = EVL->getType();
1634 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1635 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1636 IB.SetInsertPoint(LoopInsertBefore);
1637 // Cast EVL to IntptrTy.
1638 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1639 // To avoid undefined behavior for extracting with out of range index, use
1640 // the minimum of evl and element count as trip count.
1641 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1642 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1643 } else {
1644 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1645 }
1646
1647 // Cast Stride to IntptrTy.
1648 if (Stride)
1649 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1650
1651 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore,
1652 [&](IRBuilderBase &IRB, Value *Index) {
1653 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1654 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1655 if (MaskElemC->isZero())
1656 // No check
1657 return;
1658 // Unconditional check
1659 } else {
1660 // Conditional check
1662 MaskElem, &*IRB.GetInsertPoint(), false);
1663 IRB.SetInsertPoint(ThenTerm);
1664 }
1665
1666 Value *InstrumentedAddress;
1667 if (isa<VectorType>(Addr->getType())) {
1668 assert(
1669 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1670 "Expected vector of pointer.");
1671 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1672 } else if (Stride) {
1673 Index = IRB.CreateMul(Index, Stride);
1674 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1675 } else {
1676 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1677 }
1678 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1679 Alignment, Granularity, ElemTypeSize, IsWrite,
1680 SizeArgument, UseCalls, Exp, RTCI);
1681 });
1682}
1683
1684void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1685 InterestingMemoryOperand &O, bool UseCalls,
1686 const DataLayout &DL,
1687 RuntimeCallInserter &RTCI) {
1688 Value *Addr = O.getPtr();
1689
1690 // Optimization experiments.
1691 // The experiments can be used to evaluate potential optimizations that remove
1692 // instrumentation (assess false negatives). Instead of completely removing
1693 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1694 // experiments that want to remove instrumentation of this instruction).
1695 // If Exp is non-zero, this pass will emit special calls into runtime
1696 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1697 // make runtime terminate the program in a special way (with a different
1698 // exit status). Then you run the new compiler on a buggy corpus, collect
1699 // the special terminations (ideally, you don't see them at all -- no false
1700 // negatives) and make the decision on the optimization.
1702
1703 if (ClOpt && ClOptGlobals) {
1704 // If initialization order checking is disabled, a simple access to a
1705 // dynamically initialized global is always valid.
1706 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1707 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1708 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1709 NumOptimizedAccessesToGlobalVar++;
1710 return;
1711 }
1712 }
1713
1714 if (ClOpt && ClOptStack) {
1715 // A direct inbounds access to a stack variable is always valid.
1716 if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1717 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1718 NumOptimizedAccessesToStackVar++;
1719 return;
1720 }
1721 }
1722
1723 if (O.IsWrite)
1724 NumInstrumentedWrites++;
1725 else
1726 NumInstrumentedReads++;
1727
1728 unsigned Granularity = 1 << Mapping.Scale;
1729 if (O.MaybeMask) {
1730 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1731 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1732 Granularity, O.OpType, O.IsWrite, nullptr,
1733 UseCalls, Exp, RTCI);
1734 } else {
1735 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1736 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1737 UseCalls, Exp, RTCI);
1738 }
1739}
1740
1741Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1742 Value *Addr, bool IsWrite,
1743 size_t AccessSizeIndex,
1744 Value *SizeArgument,
1745 uint32_t Exp,
1746 RuntimeCallInserter &RTCI) {
1747 InstrumentationIRBuilder IRB(InsertBefore);
1748 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1749 CallInst *Call = nullptr;
1750 if (SizeArgument) {
1751 if (Exp == 0)
1752 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1753 {Addr, SizeArgument});
1754 else
1755 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1756 {Addr, SizeArgument, ExpVal});
1757 } else {
1758 if (Exp == 0)
1759 Call = RTCI.createRuntimeCall(
1760 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1761 else
1762 Call = RTCI.createRuntimeCall(
1763 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1764 }
1765
1766 Call->setCannotMerge();
1767 return Call;
1768}
1769
1770Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1771 Value *ShadowValue,
1772 uint32_t TypeStoreSize) {
1773 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1774 // Addr & (Granularity - 1)
1775 Value *LastAccessedByte =
1776 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1777 // (Addr & (Granularity - 1)) + size - 1
1778 if (TypeStoreSize / 8 > 1)
1779 LastAccessedByte = IRB.CreateAdd(
1780 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1781 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1782 LastAccessedByte =
1783 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1784 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1785 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1786}
1787
1788Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1789 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1790 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1791 // Do not instrument unsupported addrspaces.
1793 return nullptr;
1794 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1795 // Follow host instrumentation for global and constant addresses.
1796 if (PtrTy->getPointerAddressSpace() != 0)
1797 return InsertBefore;
1798 // Instrument generic addresses in supported addressspaces.
1799 IRBuilder<> IRB(InsertBefore);
1800 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1801 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1802 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1803 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1804 Value *AddrSpaceZeroLanding =
1805 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1806 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1807 return InsertBefore;
1808}
1809
1810Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1811 Value *Cond, bool Recover) {
1812 Module &M = *IRB.GetInsertBlock()->getModule();
1813 Value *ReportCond = Cond;
1814 if (!Recover) {
1815 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1816 IRB.getInt1Ty());
1817 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1818 }
1819
1820 auto *Trm =
1821 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1823 Trm->getParent()->setName("asan.report");
1824
1825 if (Recover)
1826 return Trm;
1827
1828 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1829 IRB.SetInsertPoint(Trm);
1830 return IRB.CreateCall(
1831 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1832}
1833
1834void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1835 Instruction *InsertBefore, Value *Addr,
1836 MaybeAlign Alignment,
1837 uint32_t TypeStoreSize, bool IsWrite,
1838 Value *SizeArgument, bool UseCalls,
1839 uint32_t Exp,
1840 RuntimeCallInserter &RTCI) {
1841 if (TargetTriple.isAMDGPU()) {
1842 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1843 TypeStoreSize, IsWrite, SizeArgument);
1844 if (!InsertBefore)
1845 return;
1846 }
1847
1848 InstrumentationIRBuilder IRB(InsertBefore);
1849 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1850 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1851
1852 if (UseCalls && ClOptimizeCallbacks) {
1853 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1854 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1855 IRB.CreateCall(
1856 Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
1857 {IRB.CreatePointerCast(Addr, PtrTy),
1858 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1859 return;
1860 }
1861
1862 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1863 if (UseCalls) {
1864 if (Exp == 0)
1865 RTCI.createRuntimeCall(
1866 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1867 else
1868 RTCI.createRuntimeCall(
1869 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1870 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1871 return;
1872 }
1873
1874 Type *ShadowTy =
1875 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1876 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1877 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1878 const uint64_t ShadowAlign =
1879 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1880 Value *ShadowValue = IRB.CreateAlignedLoad(
1881 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1882
1883 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1884 size_t Granularity = 1ULL << Mapping.Scale;
1885 Instruction *CrashTerm = nullptr;
1886
1887 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1888
1889 if (TargetTriple.isAMDGCN()) {
1890 if (GenSlowPath) {
1891 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1892 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1893 }
1894 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1895 } else if (GenSlowPath) {
1896 // We use branch weights for the slow path check, to indicate that the slow
1897 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1899 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1900 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1901 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1902 IRB.SetInsertPoint(CheckTerm);
1903 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1904 if (Recover) {
1905 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1906 } else {
1907 BasicBlock *CrashBlock =
1908 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1909 CrashTerm = new UnreachableInst(*C, CrashBlock);
1910 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1911 ReplaceInstWithInst(CheckTerm, NewTerm);
1912 }
1913 } else {
1914 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1915 }
1916
1917 Instruction *Crash = generateCrashCode(
1918 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1919 if (OrigIns->getDebugLoc())
1920 Crash->setDebugLoc(OrigIns->getDebugLoc());
1921}
1922
1923// Instrument unusual size or unusual alignment.
1924// We can not do it with a single check, so we do 1-byte check for the first
1925// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1926// to report the actual access size.
1927void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1928 Instruction *I, Instruction *InsertBefore, Value *Addr,
1929 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
1930 uint32_t Exp, RuntimeCallInserter &RTCI) {
1931 InstrumentationIRBuilder IRB(InsertBefore);
1932 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
1933 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
1934
1935 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1936 if (UseCalls) {
1937 if (Exp == 0)
1938 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1939 {AddrLong, Size});
1940 else
1941 RTCI.createRuntimeCall(
1942 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
1943 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1944 } else {
1945 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
1946 Value *LastByte = IRB.CreateIntToPtr(
1947 IRB.CreateAdd(AddrLong, SizeMinusOne),
1948 Addr->getType());
1949 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
1950 RTCI);
1951 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
1952 Exp, RTCI);
1953 }
1954}
1955
1956void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
1958 // Set up the arguments to our poison/unpoison functions.
1959 IRBuilder<> IRB(&GlobalInit.front(),
1960 GlobalInit.front().getFirstInsertionPt());
1961
1962 // Add a call to poison all external globals before the given function starts.
1963 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1964 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1965
1966 // Add calls to unpoison all globals before each return instruction.
1967 for (auto &BB : GlobalInit)
1968 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1969 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
1970}
1971
1972void ModuleAddressSanitizer::createInitializerPoisonCalls(
1974 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1975 if (!GV)
1976 return;
1977
1978 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1979 if (!CA)
1980 return;
1981
1982 for (Use &OP : CA->operands()) {
1983 if (isa<ConstantAggregateZero>(OP)) continue;
1984 ConstantStruct *CS = cast<ConstantStruct>(OP);
1985
1986 // Must have a function or null ptr.
1987 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1988 if (F->getName() == kAsanModuleCtorName) continue;
1989 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
1990 // Don't instrument CTORs that will run before asan.module_ctor.
1991 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
1992 continue;
1993 poisonOneInitializer(*F, ModuleName);
1994 }
1995 }
1996}
1997
1998const GlobalVariable *
1999ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2000 // In case this function should be expanded to include rules that do not just
2001 // apply when CompileKernel is true, either guard all existing rules with an
2002 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2003 // should also apply to user space.
2004 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2005
2006 const Constant *C = GA.getAliasee();
2007
2008 // When compiling the kernel, globals that are aliased by symbols prefixed
2009 // by "__" are special and cannot be padded with a redzone.
2010 if (GA.getName().starts_with("__"))
2011 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2012
2013 return nullptr;
2014}
2015
2016bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2017 Type *Ty = G->getValueType();
2018 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2019
2020 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2021 return false;
2022 if (!Ty->isSized()) return false;
2023 if (!G->hasInitializer()) return false;
2024 // Globals in address space 1 and 4 are supported for AMDGPU.
2025 if (G->getAddressSpace() &&
2026 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2027 return false;
2028 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2029 // Two problems with thread-locals:
2030 // - The address of the main thread's copy can't be computed at link-time.
2031 // - Need to poison all copies, not just the main thread's one.
2032 if (G->isThreadLocal()) return false;
2033 // For now, just ignore this Global if the alignment is large.
2034 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2035
2036 // For non-COFF targets, only instrument globals known to be defined by this
2037 // TU.
2038 // FIXME: We can instrument comdat globals on ELF if we are using the
2039 // GC-friendly metadata scheme.
2040 if (!TargetTriple.isOSBinFormatCOFF()) {
2041 if (!G->hasExactDefinition() || G->hasComdat())
2042 return false;
2043 } else {
2044 // On COFF, don't instrument non-ODR linkages.
2045 if (G->isInterposable())
2046 return false;
2047 // If the global has AvailableExternally linkage, then it is not in this
2048 // module, which means it does not need to be instrumented.
2049 if (G->hasAvailableExternallyLinkage())
2050 return false;
2051 }
2052
2053 // If a comdat is present, it must have a selection kind that implies ODR
2054 // semantics: no duplicates, any, or exact match.
2055 if (Comdat *C = G->getComdat()) {
2056 switch (C->getSelectionKind()) {
2057 case Comdat::Any:
2058 case Comdat::ExactMatch:
2060 break;
2061 case Comdat::Largest:
2062 case Comdat::SameSize:
2063 return false;
2064 }
2065 }
2066
2067 if (G->hasSection()) {
2068 // The kernel uses explicit sections for mostly special global variables
2069 // that we should not instrument. E.g. the kernel may rely on their layout
2070 // without redzones, or remove them at link time ("discard.*"), etc.
2071 if (CompileKernel)
2072 return false;
2073
2074 StringRef Section = G->getSection();
2075
2076 // Globals from llvm.metadata aren't emitted, do not instrument them.
2077 if (Section == "llvm.metadata") return false;
2078 // Do not instrument globals from special LLVM sections.
2079 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2080 return false;
2081
2082 // Do not instrument function pointers to initialization and termination
2083 // routines: dynamic linker will not properly handle redzones.
2084 if (Section.starts_with(".preinit_array") ||
2085 Section.starts_with(".init_array") ||
2086 Section.starts_with(".fini_array")) {
2087 return false;
2088 }
2089
2090 // Do not instrument user-defined sections (with names resembling
2091 // valid C identifiers)
2092 if (TargetTriple.isOSBinFormatELF()) {
2093 if (llvm::all_of(Section,
2094 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2095 return false;
2096 }
2097
2098 // On COFF, if the section name contains '$', it is highly likely that the
2099 // user is using section sorting to create an array of globals similar to
2100 // the way initialization callbacks are registered in .init_array and
2101 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2102 // to such globals is counterproductive, because the intent is that they
2103 // will form an array, and out-of-bounds accesses are expected.
2104 // See https://github.com/google/sanitizers/issues/305
2105 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2106 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2107 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2108 << *G << "\n");
2109 return false;
2110 }
2111
2112 if (TargetTriple.isOSBinFormatMachO()) {
2113 StringRef ParsedSegment, ParsedSection;
2114 unsigned TAA = 0, StubSize = 0;
2115 bool TAAParsed;
2117 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2118
2119 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2120 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2121 // them.
2122 if (ParsedSegment == "__OBJC" ||
2123 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2124 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2125 return false;
2126 }
2127 // See https://github.com/google/sanitizers/issues/32
2128 // Constant CFString instances are compiled in the following way:
2129 // -- the string buffer is emitted into
2130 // __TEXT,__cstring,cstring_literals
2131 // -- the constant NSConstantString structure referencing that buffer
2132 // is placed into __DATA,__cfstring
2133 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2134 // Moreover, it causes the linker to crash on OS X 10.7
2135 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2136 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2137 return false;
2138 }
2139 // The linker merges the contents of cstring_literals and removes the
2140 // trailing zeroes.
2141 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2142 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2143 return false;
2144 }
2145 }
2146 }
2147
2148 if (CompileKernel) {
2149 // Globals that prefixed by "__" are special and cannot be padded with a
2150 // redzone.
2151 if (G->getName().starts_with("__"))
2152 return false;
2153 }
2154
2155 return true;
2156}
2157
2158// On Mach-O platforms, we emit global metadata in a separate section of the
2159// binary in order to allow the linker to properly dead strip. This is only
2160// supported on recent versions of ld64.
2161bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2162 if (!TargetTriple.isOSBinFormatMachO())
2163 return false;
2164
2165 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2166 return true;
2167 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2168 return true;
2169 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2170 return true;
2171 if (TargetTriple.isDriverKit())
2172 return true;
2173 if (TargetTriple.isXROS())
2174 return true;
2175
2176 return false;
2177}
2178
2179StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2180 switch (TargetTriple.getObjectFormat()) {
2181 case Triple::COFF: return ".ASAN$GL";
2182 case Triple::ELF: return "asan_globals";
2183 case Triple::MachO: return "__DATA,__asan_globals,regular";
2184 case Triple::Wasm:
2185 case Triple::GOFF:
2186 case Triple::SPIRV:
2187 case Triple::XCOFF:
2190 "ModuleAddressSanitizer not implemented for object file format");
2192 break;
2193 }
2194 llvm_unreachable("unsupported object format");
2195}
2196
2197void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
2198 IRBuilder<> IRB(*C);
2199
2200 // Declare our poisoning and unpoisoning functions.
2201 AsanPoisonGlobals =
2202 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2203 AsanUnpoisonGlobals =
2204 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2205
2206 // Declare functions that register/unregister globals.
2207 AsanRegisterGlobals = M.getOrInsertFunction(
2208 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2209 AsanUnregisterGlobals = M.getOrInsertFunction(
2210 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2211
2212 // Declare the functions that find globals in a shared object and then invoke
2213 // the (un)register function on them.
2214 AsanRegisterImageGlobals = M.getOrInsertFunction(
2215 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2216 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2218
2219 AsanRegisterElfGlobals =
2220 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2221 IntptrTy, IntptrTy, IntptrTy);
2222 AsanUnregisterElfGlobals =
2223 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2224 IntptrTy, IntptrTy, IntptrTy);
2225}
2226
2227// Put the metadata and the instrumented global in the same group. This ensures
2228// that the metadata is discarded if the instrumented global is discarded.
2229void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2230 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2231 Module &M = *G->getParent();
2232 Comdat *C = G->getComdat();
2233 if (!C) {
2234 if (!G->hasName()) {
2235 // If G is unnamed, it must be internal. Give it an artificial name
2236 // so we can put it in a comdat.
2237 assert(G->hasLocalLinkage());
2238 G->setName(Twine(kAsanGenPrefix) + "_anon_global");
2239 }
2240
2241 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2242 std::string Name = std::string(G->getName());
2243 Name += InternalSuffix;
2244 C = M.getOrInsertComdat(Name);
2245 } else {
2246 C = M.getOrInsertComdat(G->getName());
2247 }
2248
2249 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2250 // linkage to internal linkage so that a symbol table entry is emitted. This
2251 // is necessary in order to create the comdat group.
2252 if (TargetTriple.isOSBinFormatCOFF()) {
2253 C->setSelectionKind(Comdat::NoDeduplicate);
2254 if (G->hasPrivateLinkage())
2255 G->setLinkage(GlobalValue::InternalLinkage);
2256 }
2257 G->setComdat(C);
2258 }
2259
2260 assert(G->hasComdat());
2261 Metadata->setComdat(G->getComdat());
2262}
2263
2264// Create a separate metadata global and put it in the appropriate ASan
2265// global registration section.
2267ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
2268 StringRef OriginalName) {
2269 auto Linkage = TargetTriple.isOSBinFormatMachO()
2273 M, Initializer->getType(), false, Linkage, Initializer,
2274 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2275 Metadata->setSection(getGlobalMetadataSection());
2276 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2277 // relocation pressure.
2279 return Metadata;
2280}
2281
2282Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
2283 AsanDtorFunction = Function::createWithDefaultAttr(
2286 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2287 // Ensure Dtor cannot be discarded, even if in a comdat.
2288 appendToUsed(M, {AsanDtorFunction});
2289 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2290
2291 return ReturnInst::Create(*C, AsanDtorBB);
2292}
2293
2294void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2295 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2296 ArrayRef<Constant *> MetadataInitializers) {
2297 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2298 auto &DL = M.getDataLayout();
2299
2300 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2301 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2302 Constant *Initializer = MetadataInitializers[i];
2303 GlobalVariable *G = ExtendedGlobals[i];
2305 CreateMetadataGlobal(M, Initializer, G->getName());
2306 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2307 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2308 MetadataGlobals[i] = Metadata;
2309
2310 // The MSVC linker always inserts padding when linking incrementally. We
2311 // cope with that by aligning each struct to its size, which must be a power
2312 // of two.
2313 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2314 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2315 "global metadata will not be padded appropriately");
2316 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2317
2318 SetComdatForGlobalMetadata(G, Metadata, "");
2319 }
2320
2321 // Update llvm.compiler.used, adding the new metadata globals. This is
2322 // needed so that during LTO these variables stay alive.
2323 if (!MetadataGlobals.empty())
2324 appendToCompilerUsed(M, MetadataGlobals);
2325}
2326
2327void ModuleAddressSanitizer::instrumentGlobalsELF(
2328 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2329 ArrayRef<Constant *> MetadataInitializers,
2330 const std::string &UniqueModuleId) {
2331 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2332
2333 // Putting globals in a comdat changes the semantic and potentially cause
2334 // false negative odr violations at link time. If odr indicators are used, we
2335 // keep the comdat sections, as link time odr violations will be dectected on
2336 // the odr indicator symbols.
2337 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2338
2339 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2340 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2341 GlobalVariable *G = ExtendedGlobals[i];
2343 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
2344 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2345 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2346 MetadataGlobals[i] = Metadata;
2347
2348 if (UseComdatForGlobalsGC)
2349 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2350 }
2351
2352 // Update llvm.compiler.used, adding the new metadata globals. This is
2353 // needed so that during LTO these variables stay alive.
2354 if (!MetadataGlobals.empty())
2355 appendToCompilerUsed(M, MetadataGlobals);
2356
2357 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2358 // to look up the loaded image that contains it. Second, we can store in it
2359 // whether registration has already occurred, to prevent duplicate
2360 // registration.
2361 //
2362 // Common linkage ensures that there is only one global per shared library.
2363 GlobalVariable *RegisteredFlag = new GlobalVariable(
2364 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2365 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2367
2368 // Create start and stop symbols.
2369 GlobalVariable *StartELFMetadata = new GlobalVariable(
2370 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2371 "__start_" + getGlobalMetadataSection());
2373 GlobalVariable *StopELFMetadata = new GlobalVariable(
2374 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2375 "__stop_" + getGlobalMetadataSection());
2377
2378 // Create a call to register the globals with the runtime.
2379 if (ConstructorKind == AsanCtorKind::Global)
2380 IRB.CreateCall(AsanRegisterElfGlobals,
2381 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2382 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2383 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2384
2385 // We also need to unregister globals at the end, e.g., when a shared library
2386 // gets closed.
2387 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2388 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2389 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2390 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2391 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2392 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2393 }
2394}
2395
2396void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2397 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2398 ArrayRef<Constant *> MetadataInitializers) {
2399 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2400
2401 // On recent Mach-O platforms, use a structure which binds the liveness of
2402 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2403 // created to be added to llvm.compiler.used
2404 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2405 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2406
2407 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2408 Constant *Initializer = MetadataInitializers[i];
2409 GlobalVariable *G = ExtendedGlobals[i];
2411 CreateMetadataGlobal(M, Initializer, G->getName());
2412
2413 // On recent Mach-O platforms, we emit the global metadata in a way that
2414 // allows the linker to properly strip dead globals.
2415 auto LivenessBinder =
2416 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2418 GlobalVariable *Liveness = new GlobalVariable(
2419 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2420 Twine("__asan_binder_") + G->getName());
2421 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2422 LivenessGlobals[i] = Liveness;
2423 }
2424
2425 // Update llvm.compiler.used, adding the new liveness globals. This is
2426 // needed so that during LTO these variables stay alive. The alternative
2427 // would be to have the linker handling the LTO symbols, but libLTO
2428 // current API does not expose access to the section for each symbol.
2429 if (!LivenessGlobals.empty())
2430 appendToCompilerUsed(M, LivenessGlobals);
2431
2432 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2433 // to look up the loaded image that contains it. Second, we can store in it
2434 // whether registration has already occurred, to prevent duplicate
2435 // registration.
2436 //
2437 // common linkage ensures that there is only one global per shared library.
2438 GlobalVariable *RegisteredFlag = new GlobalVariable(
2439 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2440 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2442
2443 if (ConstructorKind == AsanCtorKind::Global)
2444 IRB.CreateCall(AsanRegisterImageGlobals,
2445 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2446
2447 // We also need to unregister globals at the end, e.g., when a shared library
2448 // gets closed.
2449 if (DestructorKind != AsanDtorKind::None) {
2450 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2451 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2452 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2453 }
2454}
2455
2456void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2457 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2458 ArrayRef<Constant *> MetadataInitializers) {
2459 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2460 unsigned N = ExtendedGlobals.size();
2461 assert(N > 0);
2462
2463 // On platforms that don't have a custom metadata section, we emit an array
2464 // of global metadata structures.
2465 ArrayType *ArrayOfGlobalStructTy =
2466 ArrayType::get(MetadataInitializers[0]->getType(), N);
2467 auto AllGlobals = new GlobalVariable(
2468 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2469 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2470 if (Mapping.Scale > 3)
2471 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2472
2473 if (ConstructorKind == AsanCtorKind::Global)
2474 IRB.CreateCall(AsanRegisterGlobals,
2475 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2476 ConstantInt::get(IntptrTy, N)});
2477
2478 // We also need to unregister globals at the end, e.g., when a shared library
2479 // gets closed.
2480 if (DestructorKind != AsanDtorKind::None) {
2481 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2482 IrbDtor.CreateCall(AsanUnregisterGlobals,
2483 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2484 ConstantInt::get(IntptrTy, N)});
2485 }
2486}
2487
2488// This function replaces all global variables with new variables that have
2489// trailing redzones. It also creates a function that poisons
2490// redzones and inserts this function into llvm.global_ctors.
2491// Sets *CtorComdat to true if the global registration code emitted into the
2492// asan constructor is comdat-compatible.
2493void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, Module &M,
2494 bool *CtorComdat) {
2495 // Build set of globals that are aliased by some GA, where
2496 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2497 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2498 if (CompileKernel) {
2499 for (auto &GA : M.aliases()) {
2500 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2501 AliasedGlobalExclusions.insert(GV);
2502 }
2503 }
2504
2505 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2506 for (auto &G : M.globals()) {
2507 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2508 GlobalsToChange.push_back(&G);
2509 }
2510
2511 size_t n = GlobalsToChange.size();
2512 auto &DL = M.getDataLayout();
2513
2514 // A global is described by a structure
2515 // size_t beg;
2516 // size_t size;
2517 // size_t size_with_redzone;
2518 // const char *name;
2519 // const char *module_name;
2520 // size_t has_dynamic_init;
2521 // size_t padding_for_windows_msvc_incremental_link;
2522 // size_t odr_indicator;
2523 // We initialize an array of such structures and pass it to a run-time call.
2524 StructType *GlobalStructTy =
2525 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2526 IntptrTy, IntptrTy, IntptrTy);
2528 SmallVector<Constant *, 16> Initializers(n);
2529
2530 bool HasDynamicallyInitializedGlobals = false;
2531
2532 // We shouldn't merge same module names, as this string serves as unique
2533 // module ID in runtime.
2535 n != 0
2536 ? createPrivateGlobalForString(M, M.getModuleIdentifier(),
2537 /*AllowMerging*/ false, kAsanGenPrefix)
2538 : nullptr;
2539
2540 for (size_t i = 0; i < n; i++) {
2541 GlobalVariable *G = GlobalsToChange[i];
2542
2544 if (G->hasSanitizerMetadata())
2545 MD = G->getSanitizerMetadata();
2546
2547 // The runtime library tries demangling symbol names in the descriptor but
2548 // functionality like __cxa_demangle may be unavailable (e.g.
2549 // -static-libstdc++). So we demangle the symbol names here.
2550 std::string NameForGlobal = G->getName().str();
2553 /*AllowMerging*/ true, kAsanGenPrefix);
2554
2555 Type *Ty = G->getValueType();
2556 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2557 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2558 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2559
2560 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2561 Constant *NewInitializer = ConstantStruct::get(
2562 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2563
2564 // Create a new global variable with enough space for a redzone.
2565 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2566 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2568 GlobalVariable *NewGlobal = new GlobalVariable(
2569 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2570 G->getThreadLocalMode(), G->getAddressSpace());
2571 NewGlobal->copyAttributesFrom(G);
2572 NewGlobal->setComdat(G->getComdat());
2573 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2574 // Don't fold globals with redzones. ODR violation detector and redzone
2575 // poisoning implicitly creates a dependence on the global's address, so it
2576 // is no longer valid for it to be marked unnamed_addr.
2578
2579 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2580 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2581 G->isConstant()) {
2582 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2583 if (Seq && Seq->isCString())
2584 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2585 }
2586
2587 // Transfer the debug info and type metadata. The payload starts at offset
2588 // zero so we can copy the metadata over as is.
2589 NewGlobal->copyMetadata(G, 0);
2590
2591 Value *Indices2[2];
2592 Indices2[0] = IRB.getInt32(0);
2593 Indices2[1] = IRB.getInt32(0);
2594
2595 G->replaceAllUsesWith(
2596 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2597 NewGlobal->takeName(G);
2598 G->eraseFromParent();
2599 NewGlobals[i] = NewGlobal;
2600
2601 Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2602 GlobalValue *InstrumentedGlobal = NewGlobal;
2603
2604 bool CanUsePrivateAliases =
2605 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2606 TargetTriple.isOSBinFormatWasm();
2607 if (CanUsePrivateAliases && UsePrivateAlias) {
2608 // Create local alias for NewGlobal to avoid crash on ODR between
2609 // instrumented and non-instrumented libraries.
2610 InstrumentedGlobal =
2612 }
2613
2614 // ODR should not happen for local linkage.
2615 if (NewGlobal->hasLocalLinkage()) {
2616 ODRIndicator =
2617 ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2618 } else if (UseOdrIndicator) {
2619 // With local aliases, we need to provide another externally visible
2620 // symbol __odr_asan_XXX to detect ODR violation.
2621 auto *ODRIndicatorSym =
2622 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2624 kODRGenPrefix + NameForGlobal, nullptr,
2625 NewGlobal->getThreadLocalMode());
2626
2627 // Set meaningful attributes for indicator symbol.
2628 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2629 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2630 ODRIndicatorSym->setAlignment(Align(1));
2631 ODRIndicator = ODRIndicatorSym;
2632 }
2633
2634 Constant *Initializer = ConstantStruct::get(
2635 GlobalStructTy,
2636 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2637 ConstantInt::get(IntptrTy, SizeInBytes),
2638 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2641 ConstantInt::get(IntptrTy, MD.IsDynInit),
2642 Constant::getNullValue(IntptrTy),
2643 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2644
2645 if (ClInitializers && MD.IsDynInit)
2646 HasDynamicallyInitializedGlobals = true;
2647
2648 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2649
2650 Initializers[i] = Initializer;
2651 }
2652
2653 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2654 // ConstantMerge'ing them.
2655 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2656 for (size_t i = 0; i < n; i++) {
2657 GlobalVariable *G = NewGlobals[i];
2658 if (G->getName().empty()) continue;
2659 GlobalsToAddToUsedList.push_back(G);
2660 }
2661 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2662
2663 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2664 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2665 // linkage unit will only have one module constructor, and (b) the register
2666 // function will be called. The module destructor is not created when n ==
2667 // 0.
2668 *CtorComdat = true;
2669 instrumentGlobalsELF(IRB, M, NewGlobals, Initializers,
2670 getUniqueModuleId(&M));
2671 } else if (n == 0) {
2672 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2673 // all compile units will have identical module constructor/destructor.
2674 *CtorComdat = TargetTriple.isOSBinFormatELF();
2675 } else {
2676 *CtorComdat = false;
2677 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2678 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2679 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2680 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2681 } else {
2682 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2683 }
2684 }
2685
2686 // Create calls for poisoning before initializers run and unpoisoning after.
2687 if (HasDynamicallyInitializedGlobals)
2688 createInitializerPoisonCalls(M, ModuleName);
2689
2690 LLVM_DEBUG(dbgs() << M);
2691}
2692
2694ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2695 constexpr uint64_t kMaxRZ = 1 << 18;
2696 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2697
2698 uint64_t RZ = 0;
2699 if (SizeInBytes <= MinRZ / 2) {
2700 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2701 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2702 // half of MinRZ.
2703 RZ = MinRZ - SizeInBytes;
2704 } else {
2705 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2706 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2707
2708 // Round up to multiple of MinRZ.
2709 if (SizeInBytes % MinRZ)
2710 RZ += MinRZ - (SizeInBytes % MinRZ);
2711 }
2712
2713 assert((RZ + SizeInBytes) % MinRZ == 0);
2714
2715 return RZ;
2716}
2717
2718int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
2719 int LongSize = M.getDataLayout().getPointerSizeInBits();
2720 bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2721 int Version = 8;
2722 // 32-bit Android is one version ahead because of the switch to dynamic
2723 // shadow.
2724 Version += (LongSize == 32 && isAndroid);
2725 return Version;
2726}
2727
2728bool ModuleAddressSanitizer::instrumentModule(Module &M) {
2729 initializeCallbacks(M);
2730
2731 // Create a module constructor. A destructor is created lazily because not all
2732 // platforms, and not all modules need it.
2733 if (ConstructorKind == AsanCtorKind::Global) {
2734 if (CompileKernel) {
2735 // The kernel always builds with its own runtime, and therefore does not
2736 // need the init and version check calls.
2737 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2738 } else {
2739 std::string AsanVersion = std::to_string(GetAsanVersion(M));
2740 std::string VersionCheckName =
2741 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2742 std::tie(AsanCtorFunction, std::ignore) =
2744 kAsanInitName, /*InitArgTypes=*/{},
2745 /*InitArgs=*/{}, VersionCheckName);
2746 }
2747 }
2748
2749 bool CtorComdat = true;
2750 if (ClGlobals) {
2751 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2752 if (AsanCtorFunction) {
2753 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2754 instrumentGlobals(IRB, M, &CtorComdat);
2755 } else {
2756 IRBuilder<> IRB(*C);
2757 instrumentGlobals(IRB, M, &CtorComdat);
2758 }
2759 }
2760
2761 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2762
2763 // Put the constructor and destructor in comdat if both
2764 // (1) global instrumentation is not TU-specific
2765 // (2) target is ELF.
2766 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2767 if (AsanCtorFunction) {
2768 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2769 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2770 }
2771 if (AsanDtorFunction) {
2772 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2773 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2774 }
2775 } else {
2776 if (AsanCtorFunction)
2777 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2778 if (AsanDtorFunction)
2779 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2780 }
2781
2782 return true;
2783}
2784
2785void AddressSanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo *TLI) {
2786 IRBuilder<> IRB(*C);
2787 // Create __asan_report* callbacks.
2788 // IsWrite, TypeSize and Exp are encoded in the function name.
2789 for (int Exp = 0; Exp < 2; Exp++) {
2790 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2791 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2792 const std::string ExpStr = Exp ? "exp_" : "";
2793 const std::string EndingStr = Recover ? "_noabort" : "";
2794
2795 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2796 SmallVector<Type *, 2> Args1{1, IntptrTy};
2797 AttributeList AL2;
2798 AttributeList AL1;
2799 if (Exp) {
2800 Type *ExpType = Type::getInt32Ty(*C);
2801 Args2.push_back(ExpType);
2802 Args1.push_back(ExpType);
2803 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2804 AL2 = AL2.addParamAttribute(*C, 2, AK);
2805 AL1 = AL1.addParamAttribute(*C, 1, AK);
2806 }
2807 }
2808 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2809 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2810 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2811
2812 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2813 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2814 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2815
2816 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2817 AccessSizeIndex++) {
2818 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2819 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2820 M.getOrInsertFunction(
2821 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2822 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2823
2824 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2825 M.getOrInsertFunction(
2826 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2827 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2828 }
2829 }
2830 }
2831
2832 const std::string MemIntrinCallbackPrefix =
2833 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2834 ? std::string("")
2836 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2837 PtrTy, PtrTy, PtrTy, IntptrTy);
2838 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2839 PtrTy, PtrTy, IntptrTy);
2840 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2841 TLI->getAttrList(C, {1}, /*Signed=*/false),
2842 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2843
2844 AsanHandleNoReturnFunc =
2845 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2846
2847 AsanPtrCmpFunction =
2848 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2849 AsanPtrSubFunction =
2850 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2851 if (Mapping.InGlobal)
2852 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2853 ArrayType::get(IRB.getInt8Ty(), 0));
2854
2855 AMDGPUAddressShared =
2856 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2857 AMDGPUAddressPrivate =
2858 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2859}
2860
2861bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2862 // For each NSObject descendant having a +load method, this method is invoked
2863 // by the ObjC runtime before any of the static constructors is called.
2864 // Therefore we need to instrument such methods with a call to __asan_init
2865 // at the beginning in order to initialize our runtime before any access to
2866 // the shadow memory.
2867 // We cannot just ignore these methods, because they may call other
2868 // instrumented functions.
2869 if (F.getName().contains(" load]")) {
2870 FunctionCallee AsanInitFunction =
2871 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2872 IRBuilder<> IRB(&F.front(), F.front().begin());
2873 IRB.CreateCall(AsanInitFunction, {});
2874 return true;
2875 }
2876 return false;
2877}
2878
2879bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2880 // Generate code only when dynamic addressing is needed.
2881 if (Mapping.Offset != kDynamicShadowSentinel)
2882 return false;
2883
2884 IRBuilder<> IRB(&F.front().front());
2885 if (Mapping.InGlobal) {
2887 // An empty inline asm with input reg == output reg.
2888 // An opaque pointer-to-int cast, basically.
2890 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2891 StringRef(""), StringRef("=r,0"),
2892 /*hasSideEffects=*/false);
2893 LocalDynamicShadow =
2894 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2895 } else {
2896 LocalDynamicShadow =
2897 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2898 }
2899 } else {
2900 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2902 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2903 }
2904 return true;
2905}
2906
2907void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2908 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2909 // to it as uninteresting. This assumes we haven't started processing allocas
2910 // yet. This check is done up front because iterating the use list in
2911 // isInterestingAlloca would be algorithmically slower.
2912 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2913
2914 // Try to get the declaration of llvm.localescape. If it's not in the module,
2915 // we can exit early.
2916 if (!F.getParent()->getFunction("llvm.localescape")) return;
2917
2918 // Look for a call to llvm.localescape call in the entry block. It can't be in
2919 // any other block.
2920 for (Instruction &I : F.getEntryBlock()) {
2921 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2922 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2923 // We found a call. Mark all the allocas passed in as uninteresting.
2924 for (Value *Arg : II->args()) {
2925 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2926 assert(AI && AI->isStaticAlloca() &&
2927 "non-static alloca arg to localescape");
2928 ProcessedAllocas[AI] = false;
2929 }
2930 break;
2931 }
2932 }
2933}
2934
2935bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2936 bool ShouldInstrument =
2937 ClDebugMin < 0 || ClDebugMax < 0 ||
2938 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2939 Instrumented++;
2940 return !ShouldInstrument;
2941}
2942
2943bool AddressSanitizer::instrumentFunction(Function &F,
2944 const TargetLibraryInfo *TLI) {
2945 if (F.empty())
2946 return false;
2947 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2948 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2949 if (F.getName().starts_with("__asan_")) return false;
2950
2951 bool FunctionModified = false;
2952
2953 // If needed, insert __asan_init before checking for SanitizeAddress attr.
2954 // This function needs to be called even if the function body is not
2955 // instrumented.
2956 if (maybeInsertAsanInitAtFunctionEntry(F))
2957 FunctionModified = true;
2958
2959 // Leave if the function doesn't need instrumentation.
2960 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2961
2962 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2963 return FunctionModified;
2964
2965 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2966
2967 initializeCallbacks(*F.getParent(), TLI);
2968
2969 FunctionStateRAII CleanupObj(this);
2970
2971 RuntimeCallInserter RTCI(F);
2972
2973 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
2974
2975 // We can't instrument allocas used with llvm.localescape. Only static allocas
2976 // can be passed to that intrinsic.
2977 markEscapedLocalAllocas(F);
2978
2979 // We want to instrument every address only once per basic block (unless there
2980 // are calls between uses).
2981 SmallPtrSet<Value *, 16> TempsToInstrument;
2982 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
2983 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
2984 SmallVector<Instruction *, 8> NoReturnCalls;
2986 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2987
2988 // Fill the set of memory operations to instrument.
2989 for (auto &BB : F) {
2990 AllBlocks.push_back(&BB);
2991 TempsToInstrument.clear();
2992 int NumInsnsPerBB = 0;
2993 for (auto &Inst : BB) {
2994 if (LooksLikeCodeInBug11395(&Inst)) return false;
2995 // Skip instructions inserted by another instrumentation.
2996 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
2997 continue;
2998 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
2999 getInterestingMemoryOperands(&Inst, InterestingOperands);
3000
3001 if (!InterestingOperands.empty()) {
3002 for (auto &Operand : InterestingOperands) {
3003 if (ClOpt && ClOptSameTemp) {
3004 Value *Ptr = Operand.getPtr();
3005 // If we have a mask, skip instrumentation if we've already
3006 // instrumented the full object. But don't add to TempsToInstrument
3007 // because we might get another load/store with a different mask.
3008 if (Operand.MaybeMask) {
3009 if (TempsToInstrument.count(Ptr))
3010 continue; // We've seen this (whole) temp in the current BB.
3011 } else {
3012 if (!TempsToInstrument.insert(Ptr).second)
3013 continue; // We've seen this temp in the current BB.
3014 }
3015 }
3016 OperandsToInstrument.push_back(Operand);
3017 NumInsnsPerBB++;
3018 }
3019 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3023 PointerComparisonsOrSubtracts.push_back(&Inst);
3024 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3025 // ok, take it.
3026 IntrinToInstrument.push_back(MI);
3027 NumInsnsPerBB++;
3028 } else {
3029 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3030 // A call inside BB.
3031 TempsToInstrument.clear();
3032 if (CB->doesNotReturn())
3033 NoReturnCalls.push_back(CB);
3034 }
3035 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3037 }
3038 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3039 }
3040 }
3041
3042 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3043 OperandsToInstrument.size() + IntrinToInstrument.size() >
3044 (unsigned)InstrumentationWithCallsThreshold);
3045 const DataLayout &DL = F.getDataLayout();
3046 ObjectSizeOpts ObjSizeOpts;
3047 ObjSizeOpts.RoundToAlign = true;
3048 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
3049
3050 // Instrument.
3051 int NumInstrumented = 0;
3052 for (auto &Operand : OperandsToInstrument) {
3053 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3054 instrumentMop(ObjSizeVis, Operand, UseCalls,
3055 F.getDataLayout(), RTCI);
3056 FunctionModified = true;
3057 }
3058 for (auto *Inst : IntrinToInstrument) {
3059 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3060 instrumentMemIntrinsic(Inst, RTCI);
3061 FunctionModified = true;
3062 }
3063
3064 FunctionStackPoisoner FSP(F, *this, RTCI);
3065 bool ChangedStack = FSP.runOnFunction();
3066
3067 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3068 // See e.g. https://github.com/google/sanitizers/issues/37
3069 for (auto *CI : NoReturnCalls) {
3070 IRBuilder<> IRB(CI);
3071 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3072 }
3073
3074 for (auto *Inst : PointerComparisonsOrSubtracts) {
3075 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3076 FunctionModified = true;
3077 }
3078
3079 if (ChangedStack || !NoReturnCalls.empty())
3080 FunctionModified = true;
3081
3082 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3083 << F << "\n");
3084
3085 return FunctionModified;
3086}
3087
3088// Workaround for bug 11395: we don't want to instrument stack in functions
3089// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3090// FIXME: remove once the bug 11395 is fixed.
3091bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3092 if (LongSize != 32) return false;
3093 CallInst *CI = dyn_cast<CallInst>(I);
3094 if (!CI || !CI->isInlineAsm()) return false;
3095 if (CI->arg_size() <= 5)
3096 return false;
3097 // We have inline assembly with quite a few arguments.
3098 return true;
3099}
3100
3101void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3102 IRBuilder<> IRB(*C);
3103 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3104 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3105 const char *MallocNameTemplate =
3106 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3109 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3110 std::string Suffix = itostr(Index);
3111 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3112 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3113 AsanStackFreeFunc[Index] =
3114 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3115 IRB.getVoidTy(), IntptrTy, IntptrTy);
3116 }
3117 }
3118 if (ASan.UseAfterScope) {
3119 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3120 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3121 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3122 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3123 }
3124
3125 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3126 0xf3, 0xf5, 0xf8}) {
3127 std::ostringstream Name;
3129 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3130 AsanSetShadowFunc[Val] =
3131 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3132 }
3133
3134 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3135 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3136 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3137 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3138}
3139
3140void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3141 ArrayRef<uint8_t> ShadowBytes,
3142 size_t Begin, size_t End,
3143 IRBuilder<> &IRB,
3144 Value *ShadowBase) {
3145 if (Begin >= End)
3146 return;
3147
3148 const size_t LargestStoreSizeInBytes =
3149 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3150
3151 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3152
3153 // Poison given range in shadow using larges store size with out leading and
3154 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3155 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3156 // middle of a store.
3157 for (size_t i = Begin; i < End;) {
3158 if (!ShadowMask[i]) {
3159 assert(!ShadowBytes[i]);
3160 ++i;
3161 continue;
3162 }
3163
3164 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3165 // Fit store size into the range.
3166 while (StoreSizeInBytes > End - i)
3167 StoreSizeInBytes /= 2;
3168
3169 // Minimize store size by trimming trailing zeros.
3170 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3171 while (j <= StoreSizeInBytes / 2)
3172 StoreSizeInBytes /= 2;
3173 }
3174
3175 uint64_t Val = 0;
3176 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3177 if (IsLittleEndian)
3178 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3179 else
3180 Val = (Val << 8) | ShadowBytes[i + j];
3181 }
3182
3183 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3184 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3186 Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
3187 Align(1));
3188
3189 i += StoreSizeInBytes;
3190 }
3191}
3192
3193void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3194 ArrayRef<uint8_t> ShadowBytes,
3195 IRBuilder<> &IRB, Value *ShadowBase) {
3196 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3197}
3198
3199void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3200 ArrayRef<uint8_t> ShadowBytes,
3201 size_t Begin, size_t End,
3202 IRBuilder<> &IRB, Value *ShadowBase) {
3203 assert(ShadowMask.size() == ShadowBytes.size());
3204 size_t Done = Begin;
3205 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3206 if (!ShadowMask[i]) {
3207 assert(!ShadowBytes[i]);
3208 continue;
3209 }
3210 uint8_t Val = ShadowBytes[i];
3211 if (!AsanSetShadowFunc[Val])
3212 continue;
3213
3214 // Skip same values.
3215 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3216 }
3217
3218 if (j - i >= ASan.MaxInlinePoisoningSize) {
3219 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3220 RTCI.createRuntimeCall(
3221 IRB, AsanSetShadowFunc[Val],
3222 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3223 ConstantInt::get(IntptrTy, j - i)});
3224 Done = j;
3225 }
3226 }
3227
3228 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3229}
3230
3231// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3232// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3233static int StackMallocSizeClass(uint64_t LocalStackSize) {
3234 assert(LocalStackSize <= kMaxStackMallocSize);
3235 uint64_t MaxSize = kMinStackMallocSize;
3236 for (int i = 0;; i++, MaxSize *= 2)
3237 if (LocalStackSize <= MaxSize) return i;
3238 llvm_unreachable("impossible LocalStackSize");
3239}
3240
3241void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3242 Instruction *CopyInsertPoint = &F.front().front();
3243 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3244 // Insert after the dynamic shadow location is determined
3245 CopyInsertPoint = CopyInsertPoint->getNextNode();
3246 assert(CopyInsertPoint);
3247 }
3248 IRBuilder<> IRB(CopyInsertPoint);
3249 const DataLayout &DL = F.getDataLayout();
3250 for (Argument &Arg : F.args()) {
3251 if (Arg.hasByValAttr()) {
3252 Type *Ty = Arg.getParamByValType();
3253 const Align Alignment =
3254 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3255
3256 AllocaInst *AI = IRB.CreateAlloca(
3257 Ty, nullptr,
3258 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3259 ".byval");
3260 AI->setAlignment(Alignment);
3261 Arg.replaceAllUsesWith(AI);
3262
3263 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3264 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3265 }
3266 }
3267}
3268
3269PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3270 Value *ValueIfTrue,
3271 Instruction *ThenTerm,
3272 Value *ValueIfFalse) {
3273 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3274 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3275 PHI->addIncoming(ValueIfFalse, CondBlock);
3276 BasicBlock *ThenBlock = ThenTerm->getParent();
3277 PHI->addIncoming(ValueIfTrue, ThenBlock);
3278 return PHI;
3279}
3280
3281Value *FunctionStackPoisoner::createAllocaForLayout(
3282 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3283 AllocaInst *Alloca;
3284 if (Dynamic) {
3285 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3286 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3287 "MyAlloca");
3288 } else {
3289 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3290 nullptr, "MyAlloca");
3291 assert(Alloca->isStaticAlloca());
3292 }
3293 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3294 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3295 Alloca->setAlignment(Align(FrameAlignment));
3296 return IRB.CreatePointerCast(Alloca, IntptrTy);
3297}
3298
3299void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3300 BasicBlock &FirstBB = *F.begin();
3301 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3302 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3303 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3304 DynamicAllocaLayout->setAlignment(Align(32));
3305}
3306
3307void FunctionStackPoisoner::processDynamicAllocas() {
3308 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3309 assert(DynamicAllocaPoisonCallVec.empty());
3310 return;
3311 }
3312
3313 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3314 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3315 assert(APC.InsBefore);
3316 assert(APC.AI);
3317 assert(ASan.isInterestingAlloca(*APC.AI));
3318 assert(!APC.AI->isStaticAlloca());
3319
3320 IRBuilder<> IRB(APC.InsBefore);
3321 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3322 // Dynamic allocas will be unpoisoned unconditionally below in
3323 // unpoisonDynamicAllocas.
3324 // Flag that we need unpoison static allocas.
3325 }
3326
3327 // Handle dynamic allocas.
3328 createDynamicAllocasInitStorage();
3329 for (auto &AI : DynamicAllocaVec)
3330 handleDynamicAllocaCall(AI);
3331 unpoisonDynamicAllocas();
3332}
3333
3334/// Collect instructions in the entry block after \p InsBefore which initialize
3335/// permanent storage for a function argument. These instructions must remain in
3336/// the entry block so that uninitialized values do not appear in backtraces. An
3337/// added benefit is that this conserves spill slots. This does not move stores
3338/// before instrumented / "interesting" allocas.
3340 AddressSanitizer &ASan, Instruction &InsBefore,
3341 SmallVectorImpl<Instruction *> &InitInsts) {
3342 Instruction *Start = InsBefore.getNextNonDebugInstruction();
3343 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3344 // Argument initialization looks like:
3345 // 1) store <Argument>, <Alloca> OR
3346 // 2) <CastArgument> = cast <Argument> to ...
3347 // store <CastArgument> to <Alloca>
3348 // Do not consider any other kind of instruction.
3349 //
3350 // Note: This covers all known cases, but may not be exhaustive. An
3351 // alternative to pattern-matching stores is to DFS over all Argument uses:
3352 // this might be more general, but is probably much more complicated.
3353 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3354 continue;
3355 if (auto *Store = dyn_cast<StoreInst>(It)) {
3356 // The store destination must be an alloca that isn't interesting for
3357 // ASan to instrument. These are moved up before InsBefore, and they're
3358 // not interesting because allocas for arguments can be mem2reg'd.
3359 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3360 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3361 continue;
3362
3363 Value *Val = Store->getValueOperand();
3364 bool IsDirectArgInit = isa<Argument>(Val);
3365 bool IsArgInitViaCast =
3366 isa<CastInst>(Val) &&
3367 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3368 // Check that the cast appears directly before the store. Otherwise
3369 // moving the cast before InsBefore may break the IR.
3370 Val == It->getPrevNonDebugInstruction();
3371 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3372 if (!IsArgInit)
3373 continue;
3374
3375 if (IsArgInitViaCast)
3376 InitInsts.push_back(cast<Instruction>(Val));
3377 InitInsts.push_back(Store);
3378 continue;
3379 }
3380
3381 // Do not reorder past unknown instructions: argument initialization should
3382 // only involve casts and stores.
3383 return;
3384 }
3385}
3386
3387void FunctionStackPoisoner::processStaticAllocas() {
3388 if (AllocaVec.empty()) {
3389 assert(StaticAllocaPoisonCallVec.empty());
3390 return;
3391 }
3392
3393 int StackMallocIdx = -1;
3394 DebugLoc EntryDebugLocation;
3395 if (auto SP = F.getSubprogram())
3396 EntryDebugLocation =
3397 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3398
3399 Instruction *InsBefore = AllocaVec[0];
3400 IRBuilder<> IRB(InsBefore);
3401
3402 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3403 // debug info is broken, because only entry-block allocas are treated as
3404 // regular stack slots.
3405 auto InsBeforeB = InsBefore->getParent();
3406 assert(InsBeforeB == &F.getEntryBlock());
3407 for (auto *AI : StaticAllocasToMoveUp)
3408 if (AI->getParent() == InsBeforeB)
3409 AI->moveBefore(InsBefore);
3410
3411 // Move stores of arguments into entry-block allocas as well. This prevents
3412 // extra stack slots from being generated (to house the argument values until
3413 // they can be stored into the allocas). This also prevents uninitialized
3414 // values from being shown in backtraces.
3415 SmallVector<Instruction *, 8> ArgInitInsts;
3416 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3417 for (Instruction *ArgInitInst : ArgInitInsts)
3418 ArgInitInst->moveBefore(InsBefore);
3419
3420 // If we have a call to llvm.localescape, keep it in the entry block.
3421 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
3422
3424 SVD.reserve(AllocaVec.size());
3425 for (AllocaInst *AI : AllocaVec) {
3427 ASan.getAllocaSizeInBytes(*AI),
3428 0,
3429 AI->getAlign().value(),
3430 AI,
3431 0,
3432 0};
3433 SVD.push_back(D);
3434 }
3435
3436 // Minimal header size (left redzone) is 4 pointers,
3437 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3438 uint64_t Granularity = 1ULL << Mapping.Scale;
3439 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3440 const ASanStackFrameLayout &L =
3441 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3442
3443 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3445 for (auto &Desc : SVD)
3446 AllocaToSVDMap[Desc.AI] = &Desc;
3447
3448 // Update SVD with information from lifetime intrinsics.
3449 for (const auto &APC : StaticAllocaPoisonCallVec) {
3450 assert(APC.InsBefore);
3451 assert(APC.AI);
3452 assert(ASan.isInterestingAlloca(*APC.AI));
3453 assert(APC.AI->isStaticAlloca());
3454
3455 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3456 Desc.LifetimeSize = Desc.Size;
3457 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3458 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3459 if (LifetimeLoc->getFile() == FnLoc->getFile())
3460 if (unsigned Line = LifetimeLoc->getLine())
3461 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3462 }
3463 }
3464 }
3465
3466 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3467 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3468 uint64_t LocalStackSize = L.FrameSize;
3469 bool DoStackMalloc =
3470 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3471 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3472 bool DoDynamicAlloca = ClDynamicAllocaStack;
3473 // Don't do dynamic alloca or stack malloc if:
3474 // 1) There is inline asm: too often it makes assumptions on which registers
3475 // are available.
3476 // 2) There is a returns_twice call (typically setjmp), which is
3477 // optimization-hostile, and doesn't play well with introduced indirect
3478 // register-relative calculation of local variable addresses.
3479 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3480 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3481
3482 Value *StaticAlloca =
3483 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3484
3485 Value *FakeStack;
3486 Value *LocalStackBase;
3487 Value *LocalStackBaseAlloca;
3488 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3489
3490 if (DoStackMalloc) {
3491 LocalStackBaseAlloca =
3492 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3493 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3494 // void *FakeStack = __asan_option_detect_stack_use_after_return
3495 // ? __asan_stack_malloc_N(LocalStackSize)
3496 // : nullptr;
3497 // void *LocalStackBase = (FakeStack) ? FakeStack :
3498 // alloca(LocalStackSize);
3499 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3501 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3502 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3504 Instruction *Term =
3505 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3506 IRBuilder<> IRBIf(Term);
3507 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3508 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3509 Value *FakeStackValue =
3510 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3511 ConstantInt::get(IntptrTy, LocalStackSize));
3512 IRB.SetInsertPoint(InsBefore);
3513 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3514 ConstantInt::get(IntptrTy, 0));
3515 } else {
3516 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3517 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3518 // void *LocalStackBase = (FakeStack) ? FakeStack :
3519 // alloca(LocalStackSize);
3520 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3521 FakeStack =
3522 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3523 ConstantInt::get(IntptrTy, LocalStackSize));
3524 }
3525 Value *NoFakeStack =
3526 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3527 Instruction *Term =
3528 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3529 IRBuilder<> IRBIf(Term);
3530 Value *AllocaValue =
3531 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3532
3533 IRB.SetInsertPoint(InsBefore);
3534 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3535 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3536 DIExprFlags |= DIExpression::DerefBefore;
3537 } else {
3538 // void *FakeStack = nullptr;
3539 // void *LocalStackBase = alloca(LocalStackSize);
3540 FakeStack = ConstantInt::get(IntptrTy, 0);
3541 LocalStackBase =
3542 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3543 LocalStackBaseAlloca = LocalStackBase;
3544 }
3545
3546 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3547 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3548 // later passes and can result in dropped variable coverage in debug info.
3549 Value *LocalStackBaseAllocaPtr =
3550 isa<PtrToIntInst>(LocalStackBaseAlloca)
3551 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3552 : LocalStackBaseAlloca;
3553 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3554 "Variable descriptions relative to ASan stack base will be dropped");
3555
3556 // Replace Alloca instructions with base+offset.
3557 for (const auto &Desc : SVD) {
3558 AllocaInst *AI = Desc.AI;
3559 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3560 Desc.Offset);
3561 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3562 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3563 AI->getType());
3564 AI->replaceAllUsesWith(NewAllocaPtr);
3565 }
3566
3567 // The left-most redzone has enough space for at least 4 pointers.
3568 // Write the Magic value to redzone[0].
3569 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3570 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3571 BasePlus0);
3572 // Write the frame description constant to redzone[1].
3573 Value *BasePlus1 = IRB.CreateIntToPtr(
3574 IRB.CreateAdd(LocalStackBase,
3575 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3576 IntptrPtrTy);
3577 GlobalVariable *StackDescriptionGlobal =
3578 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3579 /*AllowMerging*/ true, kAsanGenPrefix);
3580 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3581 IRB.CreateStore(Description, BasePlus1);
3582 // Write the PC to redzone[2].
3583 Value *BasePlus2 = IRB.CreateIntToPtr(
3584 IRB.CreateAdd(LocalStackBase,
3585 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3586 IntptrPtrTy);
3587 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3588
3589 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3590
3591 // Poison the stack red zones at the entry.
3592 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3593 // As mask we must use most poisoned case: red zones and after scope.
3594 // As bytes we can use either the same or just red zones only.
3595 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3596
3597 if (!StaticAllocaPoisonCallVec.empty()) {
3598 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3599
3600 // Poison static allocas near lifetime intrinsics.
3601 for (const auto &APC : StaticAllocaPoisonCallVec) {
3602 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3603 assert(Desc.Offset % L.Granularity == 0);
3604 size_t Begin = Desc.Offset / L.Granularity;
3605 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3606
3607 IRBuilder<> IRB(APC.InsBefore);
3608 copyToShadow(ShadowAfterScope,
3609 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3610 IRB, ShadowBase);
3611 }
3612 }
3613
3614 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3615 SmallVector<uint8_t, 64> ShadowAfterReturn;
3616
3617 // (Un)poison the stack before all ret instructions.
3618 for (Instruction *Ret : RetVec) {
3619 IRBuilder<> IRBRet(Ret);
3620 // Mark the current frame as retired.
3621 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3622 BasePlus0);
3623 if (DoStackMalloc) {
3624 assert(StackMallocIdx >= 0);
3625 // if FakeStack != 0 // LocalStackBase == FakeStack
3626 // // In use-after-return mode, poison the whole stack frame.
3627 // if StackMallocIdx <= 4
3628 // // For small sizes inline the whole thing:
3629 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3630 // **SavedFlagPtr(FakeStack) = 0
3631 // else
3632 // __asan_stack_free_N(FakeStack, LocalStackSize)
3633 // else
3634 // <This is not a fake stack; unpoison the redzones>
3635 Value *Cmp =
3636 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3637 Instruction *ThenTerm, *ElseTerm;
3638 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3639
3640 IRBuilder<> IRBPoison(ThenTerm);
3641 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3642 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3643 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3645 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3646 ShadowBase);
3647 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3648 FakeStack,
3649 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3650 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3651 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3652 IRBPoison.CreateStore(
3653 Constant::getNullValue(IRBPoison.getInt8Ty()),
3654 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3655 } else {
3656 // For larger frames call __asan_stack_free_*.
3657 RTCI.createRuntimeCall(
3658 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3659 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3660 }
3661
3662 IRBuilder<> IRBElse(ElseTerm);
3663 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3664 } else {
3665 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3666 }
3667 }
3668
3669 // We are done. Remove the old unused alloca instructions.
3670 for (auto *AI : AllocaVec)
3671 AI->eraseFromParent();
3672}
3673
3674void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3675 IRBuilder<> &IRB, bool DoPoison) {
3676 // For now just insert the call to ASan runtime.
3677 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3678 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3679 RTCI.createRuntimeCall(
3680 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3681 {AddrArg, SizeArg});
3682}
3683
3684// Handling llvm.lifetime intrinsics for a given %alloca:
3685// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3686// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3687// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3688// could be poisoned by previous llvm.lifetime.end instruction, as the
3689// variable may go in and out of scope several times, e.g. in loops).
3690// (3) if we poisoned at least one %alloca in a function,
3691// unpoison the whole stack frame at function exit.
3692void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3693 IRBuilder<> IRB(AI);
3694
3695 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3696 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3697
3698 Value *Zero = Constant::getNullValue(IntptrTy);
3699 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3700 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3701
3702 // Since we need to extend alloca with additional memory to locate
3703 // redzones, and OldSize is number of allocated blocks with
3704 // ElementSize size, get allocated memory size in bytes by
3705 // OldSize * ElementSize.
3706 const unsigned ElementSize =
3707 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3708 Value *OldSize =
3709 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3710 ConstantInt::get(IntptrTy, ElementSize));
3711
3712 // PartialSize = OldSize % 32
3713 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3714
3715 // Misalign = kAllocaRzSize - PartialSize;
3716 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3717
3718 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3719 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3720 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3721
3722 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3723 // Alignment is added to locate left redzone, PartialPadding for possible
3724 // partial redzone and kAllocaRzSize for right redzone respectively.
3725 Value *AdditionalChunkSize = IRB.CreateAdd(
3726 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3727 PartialPadding);
3728
3729 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3730
3731 // Insert new alloca with new NewSize and Alignment params.
3732 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3733 NewAlloca->setAlignment(Alignment);
3734
3735 // NewAddress = Address + Alignment
3736 Value *NewAddress =
3737 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3738 ConstantInt::get(IntptrTy, Alignment.value()));
3739
3740 // Insert __asan_alloca_poison call for new created alloca.
3741 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3742
3743 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3744 // for unpoisoning stuff.
3745 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3746
3747 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3748
3749 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3750 AI->replaceAllUsesWith(NewAddressPtr);
3751
3752 // We are done. Erase old alloca from parent.
3753 AI->eraseFromParent();
3754}
3755
3756// isSafeAccess returns true if Addr is always inbounds with respect to its
3757// base object. For example, it is a field access or an array access with
3758// constant inbounds index.
3759bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3760 Value *Addr, TypeSize TypeStoreSize) const {
3761 if (TypeStoreSize.isScalable())
3762 // TODO: We can use vscale_range to convert a scalable value to an
3763 // upper bound on the access size.
3764 return false;
3765
3766 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3767 if (!SizeOffset.bothKnown())
3768 return false;
3769
3770 uint64_t Size = SizeOffset.Size.getZExtValue();
3771 int64_t Offset = SizeOffset.Offset.getSExtValue();
3772
3773 // Three checks are required to ensure safety:
3774 // . Offset >= 0 (since the offset is given from the base ptr)
3775 // . Size >= Offset (unsigned)
3776 // . Size - Offset >= NeededSize (unsigned)
3777 return Offset >= 0 && Size >= uint64_t(Offset) &&
3778 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3779}
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
static const uint64_t kEmscriptenShadowOffset
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
This defines the Use class.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
#define OP(OPC)
Definition: SandboxIR.h:483
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
Definition: Instructions.h:126
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
Definition: Attributes.h:606
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:414
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:202
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:290
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
Definition: InstrTypes.h:2008
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition: Comdat.h:38
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition: Comdat.h:40
@ Any
The linker may choose any COMDAT.
Definition: Comdat.h:36
@ NoDeduplicate
No deduplication is performed.
Definition: Comdat.h:39
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition: Comdat.h:37
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1292
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2269
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2215
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1240
static bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
Definition: Constants.cpp:1575
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1800
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1357
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:432
Debug location.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition: Function.h:823
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition: Function.cpp:379
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:868
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1963
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:544
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
Definition: Globals.cpp:137
void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
Definition: Metadata.cpp:1755
void setComdat(Comdat *C)
Definition: Globals.cpp:206
void setSection(StringRef S)
Change the section for this global.
Definition: Globals.cpp:267
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:231
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:567
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:271
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:254
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ CommonLinkage
Tentative definitions.
Definition: GlobalValue.h:62
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition: GlobalValue.h:53
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition: GlobalValue.h:61
DLLStorageClassTypes getDLLStorageClass() const
Definition: GlobalValue.h:275
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition: Globals.cpp:508
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1778
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:508
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2465
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1812
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2175
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2274
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1091
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2127
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:105
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1442
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1981
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:528
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2250
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1871
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:483
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2402
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1754
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2246
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1349
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition: IRBuilder.h:494
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1795
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1480
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1808
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1332
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2122
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2554
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1502
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2201
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:561
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1831
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2417
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:656
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2137
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1366
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2671
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
Definition: InstVisitor.h:244
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitReturnInst(ReturnInst &I)
Definition: InstVisitor.h:226
RetTy visitAllocaInst(AllocaInst &I)
Definition: InstVisitor.h:168
RetTy visitResumeInst(ResumeInst &I)
Definition: InstVisitor.h:238
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:363
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:824
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
Definition: Instructions.h:174
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
Metadata node.
Definition: Metadata.h:1067
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Evaluate the size and offset of an object pointed to by a Value* statically.
SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1189
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:771
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition: Triple.h:852
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition: Triple.h:553
bool isOSNetBSD() const
Definition: Triple.h:576
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:769
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition: Triple.h:943
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition: Triple.h:932
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:390
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition: Triple.h:938
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
@ DXContainer
Definition: Triple.h:301
@ UnknownObjectFormat
Definition: Triple.h:298
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition: Triple.h:857
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:678
bool isAMDGPU() const
Definition: Triple.h:847
bool isMacOSX() const
Is this a Mac OS X triple.
Definition: Triple.h:522
bool isOSFreeBSD() const
Definition: Triple.h:584
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:698
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition: Triple.h:541
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:531
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition: Triple.h:766
bool isOSFuchsia() const
Definition: Triple.h:588
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
static IntegerType * getInt32Ty(LLVMContext &C)
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1513
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition: MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
Definition: X86BaseInfo.h:732
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
@ Done
Definition: Threading.h:61
Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, const char *NamePrefix="")
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Op::Description Desc
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, Instruction *InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
@ Dynamic
Denotes mode unknown at compile time.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AsanCtorKind
Types of ASan module constructors supported.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4103
void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
Definition: ModuleUtils.cpp:78
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition: Demangle.cpp:20
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:2132
#define N
ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
Various options to control the behavior of getObjectSize.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.
bool bothKnown() const