LLVM 23.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/MD5.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
85
86static const unsigned kShadowBaseAlignment = 32;
87
88namespace {
89enum class OffsetKind {
90 kFixed = 0,
91 kGlobal,
92 kIfunc,
93 kTls,
94};
95}
96
98 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
99 cl::desc("Prefix for memory access callbacks"),
100 cl::Hidden, cl::init("__hwasan_"));
101
103 "hwasan-kernel-mem-intrinsic-prefix",
104 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
105 cl::init(false));
106
108 "hwasan-instrument-with-calls",
109 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
110 cl::init(false));
111
112static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
113 cl::desc("instrument read instructions"),
114 cl::Hidden, cl::init(true));
115
116static cl::opt<bool>
117 ClInstrumentWrites("hwasan-instrument-writes",
118 cl::desc("instrument write instructions"), cl::Hidden,
119 cl::init(true));
120
122 "hwasan-instrument-atomics",
123 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
124 cl::init(true));
125
126static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
127 cl::desc("instrument byval arguments"),
128 cl::Hidden, cl::init(true));
129
130static cl::opt<bool>
131 ClRecover("hwasan-recover",
132 cl::desc("Enable recovery mode (continue-after-error)."),
133 cl::Hidden, cl::init(false));
134
135static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
136 cl::desc("instrument stack (allocas)"),
137 cl::Hidden, cl::init(true));
138
139static cl::opt<bool>
140 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
141 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
143
145 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
147 cl::desc("How many lifetime ends to handle for a single alloca."),
149
150static cl::opt<bool>
151 ClUseAfterScope("hwasan-use-after-scope",
152 cl::desc("detect use after scope within function"),
153 cl::Hidden, cl::init(true));
154
156 "hwasan-strict-use-after-scope",
157 cl::desc("for complicated lifetimes, tag both on end and return"),
158 cl::Hidden, cl::init(true));
159
161 "hwasan-generate-tags-with-calls",
162 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
163 cl::init(false));
164
165static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
166 cl::Hidden, cl::init(false));
167
169 "hwasan-all-globals",
170 cl::desc(
171 "Instrument globals, even those within user-defined sections. Warning: "
172 "This may break existing code which walks globals via linker-generated "
173 "symbols, expects certain globals to be contiguous with each other, or "
174 "makes other assumptions which are invalidated by HWASan "
175 "instrumentation."),
176 cl::Hidden, cl::init(false));
177
179 "hwasan-match-all-tag",
180 cl::desc("don't report bad accesses via pointers with this tag"),
181 cl::Hidden, cl::init(-1));
182
183static cl::opt<bool>
184 ClEnableKhwasan("hwasan-kernel",
185 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
186 cl::Hidden, cl::init(false));
187
188// These flags allow to change the shadow mapping and control how shadow memory
189// is accessed. The shadow mapping looks like:
190// Shadow = (Mem >> scale) + offset
191
193 ClMappingOffset("hwasan-mapping-offset",
194 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
195 cl::Hidden);
196
198 "hwasan-mapping-offset-dynamic",
199 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
200 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
201 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
202 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
203
204static cl::opt<bool>
205 ClFrameRecords("hwasan-with-frame-record",
206 cl::desc("Use ring buffer for stack allocations"),
207 cl::Hidden);
208
209static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
210 cl::desc("Hot percentile cutoff."));
211
212static cl::opt<float>
213 ClRandomKeepRate("hwasan-random-rate",
214 cl::desc("Probability value in the range [0.0, 1.0] "
215 "to keep instrumentation of a function. "
216 "Note: instrumentation can be skipped randomly "
217 "OR because of the hot percentile cutoff, if "
218 "both are supplied."));
219
221 "hwasan-static-linking",
222 cl::desc("Don't use .note.hwasan.globals section to instrument globals "
223 "from loadable libraries. "
224 "Note: in static binaries, the global variables section can be "
225 "accessed directly via linker-provided "
226 "__start_hwasan_globals and __stop_hwasan_globals symbols"),
227 cl::Hidden, cl::init(false));
228
229STATISTIC(NumTotalFuncs, "Number of total funcs");
230STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
231STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
232
233// Mode for selecting how to insert frame record info into the stack ring
234// buffer.
236 // Do not record frame record info.
238
239 // Insert instructions into the prologue for storing into the stack ring
240 // buffer directly.
242
243 // Add a call to __hwasan_add_frame_record in the runtime.
245};
246
248 "hwasan-record-stack-history",
249 cl::desc("Record stack frames with tagged allocations in a thread-local "
250 "ring buffer"),
251 cl::values(clEnumVal(none, "Do not record stack ring history"),
252 clEnumVal(instr, "Insert instructions into the prologue for "
253 "storing into the stack ring buffer directly"),
254 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
255 "storing into the stack ring buffer")),
257
258static cl::opt<bool>
259 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
260 cl::desc("instrument memory intrinsics"),
261 cl::Hidden, cl::init(true));
262
263static cl::opt<bool>
264 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
265 cl::desc("instrument landing pads"), cl::Hidden,
266 cl::init(false));
267
269 "hwasan-use-short-granules",
270 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
271 cl::init(false));
272
274 "hwasan-instrument-personality-functions",
275 cl::desc("instrument personality functions"), cl::Hidden);
276
277static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
278 cl::desc("inline all checks"),
279 cl::Hidden, cl::init(false));
280
281static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
282 cl::desc("inline all checks"),
283 cl::Hidden, cl::init(false));
284
285// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
286static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
287 cl::desc("Use page aliasing in HWASan"),
288 cl::Hidden, cl::init(false));
289
290namespace {
291
292template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
293 return Opt.getNumOccurrences() ? Opt : Other;
294}
295
296bool shouldUsePageAliases(const Triple &TargetTriple) {
297 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
298}
299
300bool shouldInstrumentStack(const Triple &TargetTriple) {
301 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
302}
303
304bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
305 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
306}
307
308bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
309 return optOr(ClUseStackSafety, !DisableOptimization);
310}
311
312bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
313 bool DisableOptimization) {
314 return shouldInstrumentStack(TargetTriple) &&
315 mightUseStackSafetyAnalysis(DisableOptimization);
316}
317
318bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
319 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
320}
321
322/// An instrumentation pass implementing detection of addressability bugs
323/// using tagged pointers.
324class HWAddressSanitizer {
325public:
326 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
327 const StackSafetyGlobalInfo *SSI)
328 : M(M), SSI(SSI) {
329 this->Recover = optOr(ClRecover, Recover);
330 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
331 this->Rng = ClRandomKeepRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
332 : nullptr;
333
334 initializeModule();
335 }
336
337 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
338
339private:
340 struct ShadowTagCheckInfo {
341 Instruction *TagMismatchTerm = nullptr;
342 Value *PtrLong = nullptr;
343 Value *AddrLong = nullptr;
344 Value *PtrTag = nullptr;
345 Value *MemTag = nullptr;
346 };
347
348 bool selectiveInstrumentationShouldSkip(Function &F,
350 void initializeModule();
351 void createHwasanCtorComdat();
352 void createHwasanNote();
353
354 void initializeCallbacks(Module &M);
355
356 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
357
358 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
359 Value *getShadowNonTls(IRBuilder<> &IRB);
360
361 void untagPointerOperand(Instruction *I, Value *Addr);
362 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
363
364 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
365 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
366 DomTreeUpdater &DTU, LoopInfo *LI);
367 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
368 unsigned AccessSizeIndex,
369 Instruction *InsertBefore,
370 DomTreeUpdater &DTU, LoopInfo *LI);
371 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
372 unsigned AccessSizeIndex,
373 Instruction *InsertBefore, DomTreeUpdater &DTU,
374 LoopInfo *LI);
375 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
376 void instrumentMemIntrinsic(MemIntrinsic *MI);
377 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
378 LoopInfo *LI, const DataLayout &DL);
379 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
380 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
381 Value *Ptr);
382
384 OptimizationRemarkEmitter &ORE, Instruction *I,
385 const TargetLibraryInfo &TLI,
386 SmallVectorImpl<InterestingMemoryOperand> &Interesting);
387
388 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
389 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
390 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
391 void instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
392 const DominatorTree &DT, const PostDominatorTree &PDT,
393 const LoopInfo &LI);
394 void instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
395 Value *getNextTagWithCall(IRBuilder<> &IRB);
396 Value *getStackBaseTag(IRBuilder<> &IRB);
397 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
398 Value *getUARTag(IRBuilder<> &IRB);
399
400 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
401 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
402 unsigned retagMask(unsigned AllocaNo);
403
404 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
405
406 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
407 void instrumentGlobals();
408
409 Value *getCachedFP(IRBuilder<> &IRB);
410 Value *getFrameRecordInfo(IRBuilder<> &IRB);
411
412 void instrumentPersonalityFunctions();
413
414 LLVMContext *C;
415 Module &M;
416 const StackSafetyGlobalInfo *SSI;
417 Triple TargetTriple;
418 std::unique_ptr<RandomNumberGenerator> Rng;
419
420 /// This struct defines the shadow mapping using the rule:
421 /// If `kFixed`, then
422 /// shadow = (mem >> Scale) + Offset.
423 /// If `kGlobal`, then
424 /// extern char* __hwasan_shadow_memory_dynamic_address;
425 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
426 /// If `kIfunc`, then
427 /// extern char __hwasan_shadow[];
428 /// shadow = (mem >> Scale) + &__hwasan_shadow
429 /// If `kTls`, then
430 /// extern char *__hwasan_tls;
431 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
432 ///
433 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
434 /// ring buffer for storing stack allocations on targets that support it.
435 class ShadowMapping {
436 OffsetKind Kind;
437 uint64_t Offset;
438 uint8_t Scale;
439 bool WithFrameRecord;
440
441 void SetFixed(uint64_t O) {
442 Kind = OffsetKind::kFixed;
443 Offset = O;
444 }
445
446 public:
447 void init(Triple &TargetTriple, bool InstrumentWithCalls,
448 bool CompileKernel);
449 Align getObjectAlignment() const { return Align(1ULL << Scale); }
450 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
451 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
452 bool isInTls() const { return Kind == OffsetKind::kTls; }
453 bool isFixed() const { return Kind == OffsetKind::kFixed; }
454 uint8_t scale() const { return Scale; };
455 uint64_t offset() const {
456 assert(isFixed());
457 return Offset;
458 };
459 bool withFrameRecord() const { return WithFrameRecord; };
460 };
461
462 ShadowMapping Mapping;
463
464 Type *VoidTy = Type::getVoidTy(M.getContext());
465 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
466 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
467 Type *Int8Ty = Type::getInt8Ty(M.getContext());
468 Type *Int32Ty = Type::getInt32Ty(M.getContext());
469 Type *Int64Ty = Type::getInt64Ty(M.getContext());
470
471 bool CompileKernel;
472 bool Recover;
473 bool OutlinedChecks;
474 bool InlineFastPath;
475 bool UseShortGranules;
476 bool InstrumentLandingPads;
477 bool InstrumentWithCalls;
478 bool InstrumentStack;
479 bool InstrumentGlobals;
480 bool DetectUseAfterScope;
481 bool UsePageAliases;
482 bool UseMatchAllCallback;
483
484 std::optional<uint8_t> MatchAllTag;
485
486 unsigned PointerTagShift;
487 uint64_t TagMaskByte;
488
489 Function *HwasanCtorFunction;
490
491 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
492 FunctionCallee HwasanMemoryAccessCallbackSized[2];
493
494 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
495 FunctionCallee HwasanHandleVfork;
496
497 FunctionCallee HwasanTagMemoryFunc;
498 FunctionCallee HwasanGenerateTagFunc;
499 FunctionCallee HwasanRecordFrameRecordFunc;
500
501 Constant *ShadowGlobal;
502
503 Value *ShadowBase = nullptr;
504 Value *StackBaseTag = nullptr;
505 Value *CachedFP = nullptr;
506 GlobalValue *ThreadPtrGlobal = nullptr;
507};
508
509} // end anonymous namespace
510
513 // Return early if nosanitize_hwaddress module flag is present for the module.
514 if (checkIfAlreadyInstrumented(M, "nosanitize_hwaddress"))
515 return PreservedAnalyses::all();
516 const StackSafetyGlobalInfo *SSI = nullptr;
517 const Triple &TargetTriple = M.getTargetTriple();
518 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
519 SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(M);
520
521 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
522 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
523 for (Function &F : M)
524 HWASan.sanitizeFunction(F, FAM);
525
527 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
528 // are incrementally updated throughout this pass whenever
529 // SplitBlockAndInsertIfThen is called.
533 // GlobalsAA is considered stateless and does not get invalidated unless
534 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
535 // make changes that require GlobalsAA to be invalidated.
536 PA.abandon<GlobalsAA>();
537 return PA;
538}
540 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
542 OS, MapClassName2PassName);
543 OS << '<';
544 if (Options.CompileKernel)
545 OS << "kernel;";
546 if (Options.Recover)
547 OS << "recover";
548 OS << '>';
549}
550
551void HWAddressSanitizer::createHwasanNote() {
552 // Create a note that contains pointers to the list of global
553 // descriptors. Adding a note to the output file will cause the linker to
554 // create a PT_NOTE program header pointing to the note that we can use to
555 // find the descriptor list starting from the program headers. A function
556 // provided by the runtime initializes the shadow memory for the globals by
557 // accessing the descriptor list via the note. The dynamic loader needs to
558 // call this function whenever a library is loaded.
559 //
560 // The reason why we use a note for this instead of a more conventional
561 // approach of having a global constructor pass a descriptor list pointer to
562 // the runtime is because of an order of initialization problem. With
563 // constructors we can encounter the following problematic scenario:
564 //
565 // 1) library A depends on library B and also interposes one of B's symbols
566 // 2) B's constructors are called before A's (as required for correctness)
567 // 3) during construction, B accesses one of its "own" globals (actually
568 // interposed by A) and triggers a HWASAN failure due to the initialization
569 // for A not having happened yet
570 //
571 // Even without interposition it is possible to run into similar situations in
572 // cases where two libraries mutually depend on each other.
573 //
574 // We only need one note per binary, so put everything for the note in a
575 // comdat. This needs to be a comdat with an .init_array section to prevent
576 // newer versions of lld from discarding the note.
577 //
578 // Create the note even if we aren't instrumenting globals. This ensures that
579 // binaries linked from object files with both instrumented and
580 // non-instrumented globals will end up with a note, even if a comdat from an
581 // object file with non-instrumented globals is selected. The note is harmless
582 // if the runtime doesn't support it, since it will just be ignored.
583 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
584
585 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
586 auto *Start =
587 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
588 nullptr, "__start_hwasan_globals");
589 Start->setVisibility(GlobalValue::HiddenVisibility);
590 auto *Stop =
591 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
592 nullptr, "__stop_hwasan_globals");
593 Stop->setVisibility(GlobalValue::HiddenVisibility);
594
595 // Null-terminated so actually 8 bytes, which are required in order to align
596 // the note properly.
597 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
598
599 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
601 auto *Note =
602 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
604 Note->setSection(".note.hwasan.globals");
605 Note->setComdat(NoteComdat);
606 Note->setAlignment(Align(4));
607
608 // The pointers in the note need to be relative so that the note ends up being
609 // placed in rodata, which is the standard location for notes.
610 auto CreateRelPtr = [&](Constant *Ptr) {
614 Int32Ty);
615 };
616 Note->setInitializer(ConstantStruct::getAnon(
617 {ConstantInt::get(Int32Ty, 8), // n_namesz
618 ConstantInt::get(Int32Ty, 8), // n_descsz
619 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
620 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
622
623 // Create a zero-length global in hwasan_globals so that the linker will
624 // always create start and stop symbols.
625 auto *Dummy = new GlobalVariable(
626 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
627 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
628 Dummy->setSection("hwasan_globals");
629 Dummy->setComdat(NoteComdat);
630 Dummy->setMetadata(LLVMContext::MD_associated,
632 appendToCompilerUsed(M, Dummy);
633}
634
635void HWAddressSanitizer::createHwasanCtorComdat() {
636 std::tie(HwasanCtorFunction, std::ignore) =
639 /*InitArgTypes=*/{},
640 /*InitArgs=*/{},
641 // This callback is invoked when the functions are created the first
642 // time. Hook them into the global ctors list in that case:
643 [&](Function *Ctor, FunctionCallee) {
644 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
645 Ctor->setComdat(CtorComdat);
646 appendToGlobalCtors(M, Ctor, 0, Ctor);
647 });
648
649 // Do not create .note.hwasan.globals for static binaries, as it is only
650 // needed for instrumenting globals from dynamic libraries. In static
651 // binaries, the global variables section can be accessed directly via the
652 // __start_hwasan_globals and __stop_hwasan_globals symbols inserted by the
653 // linker.
654 if (!ClStaticLinking)
655 createHwasanNote();
656}
657
658/// Module-level initialization.
659///
660/// inserts a call to __hwasan_init to the module's constructor list.
661void HWAddressSanitizer::initializeModule() {
662 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
663 TargetTriple = M.getTargetTriple();
664
665 // HWASan may do short granule checks on function arguments read from the
666 // argument memory (last byte of the granule), which invalidates writeonly.
667 for (Function &F : M.functions())
668 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/true);
669
670 // x86_64 currently has two modes:
671 // - Intel LAM (default)
672 // - pointer aliasing (heap only)
673 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
674 UsePageAliases = shouldUsePageAliases(TargetTriple);
675 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
676 InstrumentStack = shouldInstrumentStack(TargetTriple);
677 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
678 PointerTagShift = IsX86_64 ? 57 : 56;
679 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
680
681 Mapping.init(TargetTriple, InstrumentWithCalls, CompileKernel);
682
683 C = &(M.getContext());
684 IRBuilder<> IRB(*C);
685
686 HwasanCtorFunction = nullptr;
687
688 // Older versions of Android do not have the required runtime support for
689 // short granules, global or personality function instrumentation. On other
690 // platforms we currently require using the latest version of the runtime.
691 bool NewRuntime =
692 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
693
694 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
695 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
696 TargetTriple.isOSBinFormatELF() &&
697 !optOr(ClInlineAllChecks, Recover);
698
699 // These platforms may prefer less inlining to reduce binary size.
700 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
701 TargetTriple.isOSFuchsia()));
702
703 if (ClMatchAllTag.getNumOccurrences()) {
704 if (ClMatchAllTag != -1) {
705 MatchAllTag = ClMatchAllTag & 0xFF;
706 }
707 } else if (CompileKernel) {
708 MatchAllTag = 0xFF;
709 }
710 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
711
712 // If we don't have personality function support, fall back to landing pads.
713 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
714
715 InstrumentGlobals =
716 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
717
718 if (!CompileKernel) {
719 if (InstrumentGlobals)
720 instrumentGlobals();
721
722 createHwasanCtorComdat();
723
724 bool InstrumentPersonalityFunctions =
725 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
726 if (InstrumentPersonalityFunctions)
727 instrumentPersonalityFunctions();
728 }
729
730 if (!TargetTriple.isAndroid()) {
731 ThreadPtrGlobal = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
732 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
734 "__hwasan_tls", nullptr,
737 return GV;
738 });
739 }
740}
741
742void HWAddressSanitizer::initializeCallbacks(Module &M) {
743 IRBuilder<> IRB(*C);
744 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
745 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
746 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
747 *HwasanMemsetFnTy;
748 if (UseMatchAllCallback) {
749 HwasanMemoryAccessCallbackSizedFnTy =
750 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
751 HwasanMemoryAccessCallbackFnTy =
752 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
753 HwasanMemTransferFnTy =
754 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
755 HwasanMemsetFnTy =
756 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
757 } else {
758 HwasanMemoryAccessCallbackSizedFnTy =
759 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
760 HwasanMemoryAccessCallbackFnTy =
761 FunctionType::get(VoidTy, {IntptrTy}, false);
762 HwasanMemTransferFnTy =
763 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
764 HwasanMemsetFnTy =
765 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
766 }
767
768 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
769 const std::string TypeStr = AccessIsWrite ? "store" : "load";
770 const std::string EndingStr = Recover ? "_noabort" : "";
771
772 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
773 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
774 HwasanMemoryAccessCallbackSizedFnTy);
775
776 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
777 AccessSizeIndex++) {
778 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
779 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
780 itostr(1ULL << AccessSizeIndex) +
781 MatchAllStr + EndingStr,
782 HwasanMemoryAccessCallbackFnTy);
783 }
784 }
785
786 const std::string MemIntrinCallbackPrefix =
787 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
788 ? std::string("")
790
791 HwasanMemmove = M.getOrInsertFunction(
792 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
793 HwasanMemcpy = M.getOrInsertFunction(
794 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
795 HwasanMemset = M.getOrInsertFunction(
796 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
797
798 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
799 PtrTy, Int8Ty, IntptrTy);
800 HwasanGenerateTagFunc =
801 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
802
803 HwasanRecordFrameRecordFunc =
804 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
805
806 ShadowGlobal =
807 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
808
809 HwasanHandleVfork =
810 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
811}
812
813Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
814 // An empty inline asm with input reg == output reg.
815 // An opaque no-op cast, basically.
816 // This prevents code bloat as a result of rematerializing trivial definitions
817 // such as constants or global addresses at every load and store.
818 InlineAsm *Asm =
819 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
820 StringRef(""), StringRef("=r,0"),
821 /*hasSideEffects=*/false);
822 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
823}
824
825Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
826 return getOpaqueNoopCast(IRB, ShadowGlobal);
827}
828
829Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
830 if (Mapping.isFixed()) {
831 return getOpaqueNoopCast(
833 ConstantInt::get(IntptrTy, Mapping.offset()), PtrTy));
834 }
835
836 if (Mapping.isInIfunc())
837 return getDynamicShadowIfunc(IRB);
838
839 Value *GlobalDynamicAddress =
842 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
843}
844
845bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
846 Value *Ptr) {
847 // Do not instrument accesses from different address spaces; we cannot deal
848 // with them.
849 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
850 if (PtrTy->getPointerAddressSpace() != 0)
851 return true;
852
853 // Ignore swifterror addresses.
854 // swifterror memory addresses are mem2reg promoted by instruction
855 // selection. As such they cannot have regular uses like an instrumentation
856 // function and it makes no sense to track them as memory.
857 if (Ptr->isSwiftError())
858 return true;
859
860 if (findAllocaForValue(Ptr)) {
861 if (!InstrumentStack)
862 return true;
863 if (SSI && SSI->stackAccessIsSafe(*Inst))
864 return true;
865 }
866
868 if (!InstrumentGlobals)
869 return true;
870 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
871 }
872
873 return false;
874}
875
876bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
877 Instruction *Inst, Value *Ptr) {
878 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
879 if (Ignored) {
880 ORE.emit(
881 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
882 } else {
883 ORE.emit([&]() {
884 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
885 });
886 }
887 return Ignored;
888}
889
890void HWAddressSanitizer::getInterestingMemoryOperands(
892 const TargetLibraryInfo &TLI,
894 // Skip memory accesses inserted by another instrumentation.
895 if (I->hasMetadata(LLVMContext::MD_nosanitize))
896 return;
897
898 // Do not instrument the load fetching the dynamic shadow address.
899 if (ShadowBase == I)
900 return;
901
902 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
903 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
904 return;
905 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
906 LI->getType(), LI->getAlign());
907 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
908 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
909 return;
910 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
911 SI->getValueOperand()->getType(), SI->getAlign());
912 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
913 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
914 return;
915 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
916 RMW->getValOperand()->getType(), std::nullopt);
917 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
918 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
919 return;
920 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
921 XCHG->getCompareOperand()->getType(),
922 std::nullopt);
923 } else if (auto *CI = dyn_cast<CallInst>(I)) {
924 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
925 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
926 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
927 continue;
928 Type *Ty = CI->getParamByValType(ArgNo);
929 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
930 }
932 }
933}
934
936 if (LoadInst *LI = dyn_cast<LoadInst>(I))
937 return LI->getPointerOperandIndex();
939 return SI->getPointerOperandIndex();
941 return RMW->getPointerOperandIndex();
943 return XCHG->getPointerOperandIndex();
944 report_fatal_error("Unexpected instruction");
945 return -1;
946}
947
949 size_t Res = llvm::countr_zero(TypeSize / 8);
951 return Res;
952}
953
954void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
955 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
956 TargetTriple.isRISCV64())
957 return;
958
959 IRBuilder<> IRB(I);
960 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
961 Value *UntaggedPtr =
962 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
963 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
964}
965
966Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
967 // Mem >> Scale
968 Value *Shadow = IRB.CreateLShr(Mem, Mapping.scale());
969 if (Mapping.isFixed() && Mapping.offset() == 0)
970 return IRB.CreateIntToPtr(Shadow, PtrTy);
971 // (Mem >> Scale) + Offset
972 return IRB.CreatePtrAdd(ShadowBase, Shadow);
973}
974
975int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
976 unsigned AccessSizeIndex) {
977 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
978 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
979 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
980 (Recover << HWASanAccessInfo::RecoverShift) |
981 (IsWrite << HWASanAccessInfo::IsWriteShift) |
982 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
983}
984
985HWAddressSanitizer::ShadowTagCheckInfo
986HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
987 DomTreeUpdater &DTU, LoopInfo *LI) {
988 ShadowTagCheckInfo R;
989
990 IRBuilder<> IRB(InsertBefore);
991
992 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
993 R.PtrTag =
994 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
995 R.AddrLong = untagPointer(IRB, R.PtrLong);
996 Value *Shadow = memToShadow(R.AddrLong, IRB);
997 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
998 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
999
1000 if (MatchAllTag.has_value()) {
1001 Value *TagNotIgnored = IRB.CreateICmpNE(
1002 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
1003 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
1004 }
1005
1006 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
1007 TagMismatch, InsertBefore, false,
1008 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1009
1010 return R;
1011}
1012
1013void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
1014 unsigned AccessSizeIndex,
1015 Instruction *InsertBefore,
1016 DomTreeUpdater &DTU,
1017 LoopInfo *LI) {
1018 assert(!UsePageAliases);
1019 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1020
1021 if (InlineFastPath)
1022 InsertBefore =
1023 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
1024
1025 IRBuilder<> IRB(InsertBefore);
1026 bool UseFixedShadowIntrinsic = false;
1027 // The memaccess fixed shadow intrinsic is only supported on AArch64,
1028 // which allows a 16-bit immediate to be left-shifted by 32.
1029 // Since kShadowBaseAlignment == 32, and Linux by default will not
1030 // mmap above 48-bits, practically any valid shadow offset is
1031 // representable.
1032 // In particular, an offset of 4TB (1024 << 32) is representable, and
1033 // ought to be good enough for anybody.
1034 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1035 uint16_t OffsetShifted = Mapping.offset() >> 32;
1036 UseFixedShadowIntrinsic =
1037 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1038 }
1039
1040 if (UseFixedShadowIntrinsic) {
1041 IRB.CreateIntrinsic(
1042 UseShortGranules
1043 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1044 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1045 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
1046 ConstantInt::get(Int64Ty, Mapping.offset())});
1047 } else {
1048 IRB.CreateIntrinsic(
1049 UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1050 : Intrinsic::hwasan_check_memaccess,
1051 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
1052 }
1053}
1054
1055void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1056 unsigned AccessSizeIndex,
1057 Instruction *InsertBefore,
1058 DomTreeUpdater &DTU,
1059 LoopInfo *LI) {
1060 assert(!UsePageAliases);
1061 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1062
1063 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1064
1065 IRBuilder<> IRB(TCI.TagMismatchTerm);
1066 Value *OutOfShortGranuleTagRange =
1067 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
1068 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1069 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
1070 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1071
1072 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1073 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1074 PtrLowBits = IRB.CreateAdd(
1075 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1076 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1077 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1079 LI, CheckFailTerm->getParent());
1080
1081 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1082 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1083 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1084 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1085 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1086 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1088 LI, CheckFailTerm->getParent());
1089
1090 IRB.SetInsertPoint(CheckFailTerm);
1091 InlineAsm *Asm;
1092 switch (TargetTriple.getArch()) {
1093 case Triple::x86_64:
1094 // The signal handler will find the data address in rdi.
1096 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1097 "int3\nnopl " +
1098 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1099 "(%rax)",
1100 "{rdi}",
1101 /*hasSideEffects=*/true);
1102 break;
1103 case Triple::aarch64:
1104 case Triple::aarch64_be:
1105 // The signal handler will find the data address in x0.
1107 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1108 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1109 "{x0}",
1110 /*hasSideEffects=*/true);
1111 break;
1112 case Triple::riscv64:
1113 // The signal handler will find the data address in x10.
1115 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1116 "ebreak\naddiw x0, x11, " +
1117 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1118 "{x10}",
1119 /*hasSideEffects=*/true);
1120 break;
1121 default:
1122 report_fatal_error("unsupported architecture");
1123 }
1124 IRB.CreateCall(Asm, TCI.PtrLong);
1125 if (Recover)
1126 cast<BranchInst>(CheckFailTerm)
1127 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1128}
1129
1130bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1131 MemIntrinsic *MI) {
1133 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1134 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1135 }
1136 if (isa<MemSetInst>(MI))
1137 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1138 return false;
1139}
1140
1141void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1142 IRBuilder<> IRB(MI);
1143 if (isa<MemTransferInst>(MI)) {
1145 MI->getOperand(0), MI->getOperand(1),
1146 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1147
1148 if (UseMatchAllCallback)
1149 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1150 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1151 } else if (isa<MemSetInst>(MI)) {
1153 MI->getOperand(0),
1154 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1155 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1156 if (UseMatchAllCallback)
1157 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1158 IRB.CreateCall(HwasanMemset, Args);
1159 }
1160 MI->eraseFromParent();
1161}
1162
1163bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1164 DomTreeUpdater &DTU, LoopInfo *LI,
1165 const DataLayout &DL) {
1166 Value *Addr = O.getPtr();
1167
1168 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1169
1170 // If the pointer is statically known to be zero, the tag check will pass
1171 // since:
1172 // 1) it has a zero tag
1173 // 2) the shadow memory corresponding to address 0 is initialized to zero and
1174 // never updated.
1175 // We can therefore elide the tag check.
1176 llvm::KnownBits Known(DL.getPointerTypeSizeInBits(Addr->getType()));
1177 llvm::computeKnownBits(Addr, Known, DL);
1178 if (Known.isZero())
1179 return false;
1180
1181 if (O.MaybeMask)
1182 return false; // FIXME
1183
1184 IRBuilder<> IRB(O.getInsn());
1185 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1186 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1187 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1188 *O.Alignment >= O.TypeStoreSize / 8)) {
1189 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1190 if (InstrumentWithCalls) {
1191 SmallVector<Value *, 2> Args{IRB.CreatePointerCast(Addr, IntptrTy)};
1192 if (UseMatchAllCallback)
1193 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1194 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1195 Args);
1196 } else if (OutlinedChecks) {
1197 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1198 DTU, LI);
1199 } else {
1200 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1201 DTU, LI);
1202 }
1203 } else {
1205 IRB.CreatePointerCast(Addr, IntptrTy),
1206 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1207 ConstantInt::get(IntptrTy, 8))};
1208 if (UseMatchAllCallback)
1209 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1210 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1211 }
1212 untagPointerOperand(O.getInsn(), Addr);
1213
1214 return true;
1215}
1216
1217void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1218 size_t Size) {
1219 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1220 if (!UseShortGranules)
1221 Size = AlignedSize;
1222
1223 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1224 if (InstrumentWithCalls) {
1225 IRB.CreateCall(HwasanTagMemoryFunc,
1226 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1227 ConstantInt::get(IntptrTy, AlignedSize)});
1228 } else {
1229 size_t ShadowSize = Size >> Mapping.scale();
1230 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1231 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1232 // If this memset is not inlined, it will be intercepted in the hwasan
1233 // runtime library. That's OK, because the interceptor skips the checks if
1234 // the address is in the shadow region.
1235 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1236 // llvm.memset right here into either a sequence of stores, or a call to
1237 // hwasan_tag_memory.
1238 if (ShadowSize)
1239 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1240 if (Size != AlignedSize) {
1241 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1242 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1243 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1244 IRB.CreateStore(
1245 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1246 AlignedSize - 1));
1247 }
1248 }
1249}
1250
1251unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1252 if (TargetTriple.getArch() == Triple::x86_64)
1253 return AllocaNo & TagMaskByte;
1254
1255 // A list of 8-bit numbers that have at most one run of non-zero bits.
1256 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1257 // masks.
1258 // The list does not include the value 255, which is used for UAR.
1259 //
1260 // Because we are more likely to use earlier elements of this list than later
1261 // ones, it is sorted in increasing order of probability of collision with a
1262 // mask allocated (temporally) nearby. The program that generated this list
1263 // can be found at:
1264 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1265 static const unsigned FastMasks[] = {
1266 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1267 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1268 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1269 return FastMasks[AllocaNo % std::size(FastMasks)];
1270}
1271
1272Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1273 if (TagMaskByte == 0xFF)
1274 return OldTag; // No need to clear the tag byte.
1275 return IRB.CreateAnd(OldTag,
1276 ConstantInt::get(OldTag->getType(), TagMaskByte));
1277}
1278
1279Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1280 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1281}
1282
1283Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1285 return nullptr;
1286 if (StackBaseTag)
1287 return StackBaseTag;
1288 // Extract some entropy from the stack pointer for the tags.
1289 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1290 // between functions).
1291 Value *FramePointerLong = getCachedFP(IRB);
1292 Value *StackTag =
1293 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1294 IRB.CreateLShr(FramePointerLong, 20)));
1295 StackTag->setName("hwasan.stack.base.tag");
1296 return StackTag;
1297}
1298
1299Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1300 unsigned AllocaNo) {
1302 return getNextTagWithCall(IRB);
1303 return IRB.CreateXor(
1304 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1305}
1306
1307Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1308 Value *FramePointerLong = getCachedFP(IRB);
1309 Value *UARTag =
1310 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1311
1312 UARTag->setName("hwasan.uar.tag");
1313 return UARTag;
1314}
1315
1316// Add a tag to an address.
1317Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1318 Value *PtrLong, Value *Tag) {
1319 assert(!UsePageAliases);
1320 Value *TaggedPtrLong;
1321 if (CompileKernel) {
1322 // Kernel addresses have 0xFF in the most significant byte.
1323 Value *ShiftedTag =
1324 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1325 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1326 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1327 } else {
1328 // Userspace can simply do OR (tag << PointerTagShift);
1329 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1330 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1331 }
1332 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1333}
1334
1335// Remove tag from an address.
1336Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1337 assert(!UsePageAliases);
1338 Value *UntaggedPtrLong;
1339 if (CompileKernel) {
1340 // Kernel addresses have 0xFF in the most significant byte.
1341 UntaggedPtrLong =
1342 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1343 TagMaskByte << PointerTagShift));
1344 } else {
1345 // Userspace addresses have 0x00.
1346 UntaggedPtrLong = IRB.CreateAnd(
1347 PtrLong, ConstantInt::get(PtrLong->getType(),
1348 ~(TagMaskByte << PointerTagShift)));
1349 }
1350 return UntaggedPtrLong;
1351}
1352
1353Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1354 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1355 // in Bionic's libc/platform/bionic/tls_defines.h.
1356 constexpr int SanitizerSlot = 6;
1357 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1358 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1359 return ThreadPtrGlobal;
1360}
1361
1362Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1363 if (!CachedFP)
1364 CachedFP = memtag::getFP(IRB);
1365 return CachedFP;
1366}
1367
1368Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1369 // Prepare ring buffer data.
1370 Value *PC = memtag::getPC(TargetTriple, IRB);
1371 Value *FP = getCachedFP(IRB);
1372
1373 // Mix FP and PC.
1374 // Assumptions:
1375 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1376 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1377 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1378 // 0xFFFFPPPPPPPPPPPP
1379 //
1380 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1381 // prefer FP-relative offsets for functions compiled with HWASan.
1382 FP = IRB.CreateShl(FP, 44);
1383 return IRB.CreateOr(PC, FP);
1384}
1385
1386void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1387 if (!Mapping.isInTls())
1388 ShadowBase = getShadowNonTls(IRB);
1389 else if (!WithFrameRecord && TargetTriple.isAndroid())
1390 ShadowBase = getDynamicShadowIfunc(IRB);
1391
1392 if (!WithFrameRecord && ShadowBase)
1393 return;
1394
1395 Value *SlotPtr = nullptr;
1396 Value *ThreadLong = nullptr;
1397 Value *ThreadLongMaybeUntagged = nullptr;
1398
1399 auto getThreadLongMaybeUntagged = [&]() {
1400 if (!SlotPtr)
1401 SlotPtr = getHwasanThreadSlotPtr(IRB);
1402 if (!ThreadLong)
1403 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1404 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1405 // TBI.
1406 return TargetTriple.isAArch64() ? ThreadLong
1407 : untagPointer(IRB, ThreadLong);
1408 };
1409
1410 if (WithFrameRecord) {
1411 switch (ClRecordStackHistory) {
1412 case libcall: {
1413 // Emit a runtime call into hwasan rather than emitting instructions for
1414 // recording stack history.
1415 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1416 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1417 break;
1418 }
1419 case instr: {
1420 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1421
1422 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1423
1424 // Store data to ring buffer.
1425 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1426 Value *RecordPtr =
1427 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1428 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1429
1430 IRB.CreateStore(memtag::incrementThreadLong(IRB, ThreadLong, 8), SlotPtr);
1431 break;
1432 }
1433 case none: {
1435 "A stack history recording mode should've been selected.");
1436 }
1437 }
1438 }
1439
1440 if (!ShadowBase) {
1441 if (!ThreadLongMaybeUntagged)
1442 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1443
1444 // Get shadow base address by aligning RecordPtr up.
1445 // Note: this is not correct if the pointer is already aligned.
1446 // Runtime library will make sure this never happens.
1447 ShadowBase = IRB.CreateAdd(
1448 IRB.CreateOr(
1449 ThreadLongMaybeUntagged,
1450 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1451 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1452 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1453 }
1454}
1455
1456void HWAddressSanitizer::instrumentLandingPads(
1457 SmallVectorImpl<Instruction *> &LandingPadVec) {
1458 for (auto *LP : LandingPadVec) {
1459 IRBuilder<> IRB(LP->getNextNode());
1460 IRB.CreateCall(
1461 HwasanHandleVfork,
1463 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1464 }
1465}
1466
1467void HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1468 Value *StackTag, Value *UARTag,
1469 const DominatorTree &DT,
1470 const PostDominatorTree &PDT,
1471 const LoopInfo &LI) {
1472 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1473 // alloca addresses using that. Unfortunately, offsets are not known yet
1474 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1475 // temp, shift-OR it into each alloca address and xor with the retag mask.
1476 // This generates one extra instruction per alloca use.
1477 unsigned int I = 0;
1478
1479 for (auto &KV : SInfo.AllocasToInstrument) {
1480 auto N = I++;
1481 auto *AI = KV.first;
1482 memtag::AllocaInfo &Info = KV.second;
1483 IRBuilder<> IRB(AI->getNextNode());
1484
1485 // Replace uses of the alloca with tagged address.
1486 Value *Tag = getAllocaTag(IRB, StackTag, N);
1487 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1488 Value *AINoTagLong = untagPointer(IRB, AILong);
1489 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1490 std::string Name =
1491 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1492 Replacement->setName(Name + ".hwasan");
1493
1494 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1495 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1496
1497 AI->replaceUsesWithIf(Replacement, [AILong](const Use &U) {
1498 auto *User = U.getUser();
1499 return User != AILong && !isa<LifetimeIntrinsic>(User);
1500 });
1501
1502 memtag::annotateDebugRecords(Info, retagMask(N));
1503
1504 auto TagStarts = [&]() {
1505 for (IntrinsicInst *Start : Info.LifetimeStart) {
1506 IRB.SetInsertPoint(Start->getNextNode());
1507 tagAlloca(IRB, AI, Tag, Size);
1508 }
1509 };
1510 auto TagEnd = [&](Instruction *Node) {
1511 IRB.SetInsertPoint(Node);
1512 // When untagging, use the `AlignedSize` because we need to set the tags
1513 // for the entire alloca to original. If we used `Size` here, we would
1514 // keep the last granule tagged, and store zero in the last byte of the
1515 // last granule, due to how short granules are implemented.
1516 tagAlloca(IRB, AI, UARTag, AlignedSize);
1517 };
1518 auto EraseLifetimes = [&]() {
1519 for (auto &II : Info.LifetimeStart)
1520 II->eraseFromParent();
1521 for (auto &II : Info.LifetimeEnd)
1522 II->eraseFromParent();
1523 };
1524 // Calls to functions that may return twice (e.g. setjmp) confuse the
1525 // postdominator analysis, and will leave us to keep memory tagged after
1526 // function return. Work around this by always untagging at every return
1527 // statement if return_twice functions are called.
1528 if (DetectUseAfterScope && !SInfo.CallsReturnTwice &&
1529 memtag::isSupportedLifetime(Info, &DT, &LI)) {
1530 TagStarts();
1531 if (!memtag::forAllReachableExits(DT, PDT, LI, Info, SInfo.RetVec,
1532 TagEnd)) {
1533 for (auto *End : Info.LifetimeEnd)
1534 End->eraseFromParent();
1535 }
1536 } else if (DetectUseAfterScope && ClStrictUseAfterScope) {
1537 // SInfo.CallsReturnTwice || !isStandardLifetime
1538 tagAlloca(IRB, AI, Tag, Size);
1539 TagStarts();
1540 for_each(Info.LifetimeEnd, TagEnd);
1541 for_each(SInfo.RetVec, TagEnd);
1542 EraseLifetimes();
1543 } else {
1544 tagAlloca(IRB, AI, Tag, Size);
1545 for_each(SInfo.RetVec, TagEnd);
1546 EraseLifetimes();
1547 }
1548 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1549 }
1550}
1551
1553 bool Skip) {
1554 if (Skip) {
1555 ORE.emit([&]() {
1556 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1557 << "Skipped: F=" << ore::NV("Function", &F);
1558 });
1559 } else {
1560 ORE.emit([&]() {
1561 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1562 << "Sanitized: F=" << ore::NV("Function", &F);
1563 });
1564 }
1565}
1566
1567bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1569 auto SkipHot = [&]() {
1570 if (!ClHotPercentileCutoff.getNumOccurrences())
1571 return false;
1573 ProfileSummaryInfo *PSI =
1574 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1575 if (!PSI || !PSI->hasProfileSummary()) {
1576 ++NumNoProfileSummaryFuncs;
1577 return false;
1578 }
1579 return PSI->isFunctionHotInCallGraphNthPercentile(
1581 };
1582
1583 auto SkipRandom = [&]() {
1584 if (!ClRandomKeepRate.getNumOccurrences())
1585 return false;
1586 std::bernoulli_distribution D(ClRandomKeepRate);
1587 return !D(*Rng);
1588 };
1589
1590 bool Skip = SkipRandom() || SkipHot();
1592 return Skip;
1593}
1594
1595void HWAddressSanitizer::sanitizeFunction(Function &F,
1597 if (&F == HwasanCtorFunction)
1598 return;
1599
1600 // Do not apply any instrumentation for naked functions.
1601 if (F.hasFnAttribute(Attribute::Naked))
1602 return;
1603
1604 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1605 return;
1606
1607 if (F.empty())
1608 return;
1609
1610 if (F.isPresplitCoroutine())
1611 return;
1612
1613 NumTotalFuncs++;
1614
1617
1618 if (selectiveInstrumentationShouldSkip(F, FAM))
1619 return;
1620
1621 NumInstrumentedFuncs++;
1622
1623 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1624
1625 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1626 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1627 SmallVector<Instruction *, 8> LandingPadVec;
1629
1631 for (auto &Inst : instructions(F)) {
1632 if (InstrumentStack) {
1633 SIB.visit(ORE, Inst);
1634 }
1635
1636 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1637 LandingPadVec.push_back(&Inst);
1638
1639 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1640
1642 if (!ignoreMemIntrinsic(ORE, MI))
1643 IntrinToInstrument.push_back(MI);
1644 }
1645
1646 memtag::StackInfo &SInfo = SIB.get();
1647
1648 initializeCallbacks(*F.getParent());
1649
1650 if (!LandingPadVec.empty())
1651 instrumentLandingPads(LandingPadVec);
1652
1653 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1654 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1655 // __hwasan_personality_thunk is a no-op for functions without an
1656 // instrumented stack, so we can drop it.
1657 F.setPersonalityFn(nullptr);
1658 }
1659
1660 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1661 IntrinToInstrument.empty())
1662 return;
1663
1664 assert(!ShadowBase);
1665
1666 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1667 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1668 emitPrologue(EntryIRB,
1669 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1670 Mapping.withFrameRecord() &&
1671 !SInfo.AllocasToInstrument.empty());
1672
1673 if (!SInfo.AllocasToInstrument.empty()) {
1676 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1677 Value *StackTag = getStackBaseTag(EntryIRB);
1678 Value *UARTag = getUARTag(EntryIRB);
1679 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1680 }
1681
1682 // If we split the entry block, move any allocas that were originally in the
1683 // entry block back into the entry block so that they aren't treated as
1684 // dynamic allocas.
1685 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1686 InsertPt = F.getEntryBlock().begin();
1687 for (Instruction &I :
1688 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1689 if (auto *AI = dyn_cast<AllocaInst>(&I))
1690 if (isa<ConstantInt>(AI->getArraySize()))
1691 I.moveBefore(F.getEntryBlock(), InsertPt);
1692 }
1693 }
1694
1698 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1699 const DataLayout &DL = F.getDataLayout();
1700 for (auto &Operand : OperandsToInstrument)
1701 instrumentMemAccess(Operand, DTU, LI, DL);
1702 DTU.flush();
1703
1704 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1705 for (auto *Inst : IntrinToInstrument)
1706 instrumentMemIntrinsic(Inst);
1707 }
1708
1709 ShadowBase = nullptr;
1710 StackBaseTag = nullptr;
1711 CachedFP = nullptr;
1712}
1713
1714void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1715 assert(!UsePageAliases);
1716 Constant *Initializer = GV->getInitializer();
1717 uint64_t SizeInBytes =
1718 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1719 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1720 if (SizeInBytes != NewSize) {
1721 // Pad the initializer out to the next multiple of 16 bytes and add the
1722 // required short granule tag.
1723 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1724 Init.back() = Tag;
1726 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1727 }
1728
1729 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1730 GlobalValue::ExternalLinkage, Initializer,
1731 GV->getName() + ".hwasan");
1732 NewGV->copyAttributesFrom(GV);
1733 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1734 NewGV->copyMetadata(GV, 0);
1735 NewGV->setAlignment(
1736 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1737
1738 // It is invalid to ICF two globals that have different tags. In the case
1739 // where the size of the global is a multiple of the tag granularity the
1740 // contents of the globals may be the same but the tags (i.e. symbol values)
1741 // may be different, and the symbols are not considered during ICF. In the
1742 // case where the size is not a multiple of the granularity, the short granule
1743 // tags would discriminate two globals with different tags, but there would
1744 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1745 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1746 // granule tag in the last byte.
1747 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1748
1749 // Descriptor format (assuming little-endian):
1750 // bytes 0-3: relative address of global
1751 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1752 // it isn't, we create multiple descriptors)
1753 // byte 7: tag
1754 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1755 const uint64_t MaxDescriptorSize = 0xfffff0;
1756 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1757 DescriptorPos += MaxDescriptorSize) {
1758 auto *Descriptor =
1759 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1760 nullptr, GV->getName() + ".hwasan.descriptor");
1761 auto *GVRelPtr = ConstantExpr::getTrunc(
1764 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1765 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1766 ConstantInt::get(Int64Ty, DescriptorPos)),
1767 Int32Ty);
1768 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1769 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1770 Descriptor->setComdat(NewGV->getComdat());
1771 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1772 Descriptor->setSection("hwasan_globals");
1773 Descriptor->setMetadata(LLVMContext::MD_associated,
1775 appendToCompilerUsed(M, Descriptor);
1776 }
1777
1780 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1781 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1782 GV->getType());
1783 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1784 GV->getLinkage(), "", Aliasee, &M);
1785 Alias->setVisibility(GV->getVisibility());
1786 Alias->takeName(GV);
1787 GV->replaceAllUsesWith(Alias);
1788 GV->eraseFromParent();
1789}
1790
1791void HWAddressSanitizer::instrumentGlobals() {
1792 std::vector<GlobalVariable *> Globals;
1793 for (GlobalVariable &GV : M.globals()) {
1795 continue;
1796
1797 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1798 GV.isThreadLocal())
1799 continue;
1800
1801 // Common symbols can't have aliases point to them, so they can't be tagged.
1802 if (GV.hasCommonLinkage())
1803 continue;
1804
1805 if (ClAllGlobals) {
1806 // Avoid instrumenting intrinsic global variables.
1807 if (GV.getSection() == "llvm.metadata")
1808 continue;
1809 } else {
1810 // Globals with custom sections may be used in __start_/__stop_
1811 // enumeration, which would be broken both by adding tags and potentially
1812 // by the extra padding/alignment that we insert.
1813 if (GV.hasSection())
1814 continue;
1815 }
1816
1817 Globals.push_back(&GV);
1818 }
1819
1820 MD5 Hasher;
1821 Hasher.update(M.getSourceFileName());
1822 MD5::MD5Result Hash;
1823 Hasher.final(Hash);
1824 uint8_t Tag = Hash[0];
1825
1826 assert(TagMaskByte >= 16);
1827
1828 for (GlobalVariable *GV : Globals) {
1829 // Don't allow globals to be tagged with something that looks like a
1830 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1831 // the fast path shadow-vs-address check succeeds.
1832 if (Tag < 16 || Tag > TagMaskByte)
1833 Tag = 16;
1834 instrumentGlobal(GV, Tag++);
1835 }
1836}
1837
1838void HWAddressSanitizer::instrumentPersonalityFunctions() {
1839 // We need to untag stack frames as we unwind past them. That is the job of
1840 // the personality function wrapper, which either wraps an existing
1841 // personality function or acts as a personality function on its own. Each
1842 // function that has a personality function or that can be unwound past has
1843 // its personality function changed to a thunk that calls the personality
1844 // function wrapper in the runtime.
1846 for (Function &F : M) {
1847 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1848 continue;
1849
1850 if (F.hasPersonalityFn()) {
1851 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1852 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1853 PersonalityFns[nullptr].push_back(&F);
1854 }
1855 }
1856
1857 if (PersonalityFns.empty())
1858 return;
1859
1860 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1861 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1862 PtrTy, PtrTy, PtrTy, PtrTy);
1863 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1864 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1865
1866 for (auto &P : PersonalityFns) {
1867 std::string ThunkName = kHwasanPersonalityThunkName;
1868 if (P.first)
1869 ThunkName += ("." + P.first->getName()).str();
1870 FunctionType *ThunkFnTy = FunctionType::get(
1871 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1872 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1873 cast<GlobalValue>(P.first)->hasLocalLinkage());
1874 auto *ThunkFn = Function::Create(ThunkFnTy,
1877 ThunkName, &M);
1878 // TODO: think about other attributes as well.
1879 if (any_of(P.second, [](const Function *F) {
1880 return F->hasFnAttribute("branch-target-enforcement");
1881 })) {
1882 ThunkFn->addFnAttr("branch-target-enforcement");
1883 }
1884 if (!IsLocal) {
1885 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1886 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1887 }
1888
1889 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1890 IRBuilder<> IRB(BB);
1891 CallInst *WrapperCall = IRB.CreateCall(
1892 HwasanPersonalityWrapper,
1893 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1894 ThunkFn->getArg(3), ThunkFn->getArg(4),
1895 P.first ? P.first : Constant::getNullValue(PtrTy),
1896 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1897 WrapperCall->setTailCall();
1898 IRB.CreateRet(WrapperCall);
1899
1900 for (Function *F : P.second)
1901 F->setPersonalityFn(ThunkFn);
1902 }
1903}
1904
1905void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1906 bool InstrumentWithCalls,
1907 bool CompileKernel) {
1908 // Start with defaults.
1909 Scale = kDefaultShadowScale;
1910 Kind = OffsetKind::kTls;
1911 WithFrameRecord = true;
1912
1913 // Tune for the target.
1914 if (TargetTriple.isOSFuchsia()) {
1915 // Fuchsia is always PIE, which means that the beginning of the address
1916 // space is always available.
1917 Kind = OffsetKind::kGlobal;
1918 } else if (CompileKernel || InstrumentWithCalls) {
1919 SetFixed(0);
1920 WithFrameRecord = false;
1921 }
1922
1923 WithFrameRecord = optOr(ClFrameRecords, WithFrameRecord);
1924
1925 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1926 Kind = optOr(ClMappingOffsetDynamic, Kind);
1927 if (ClMappingOffset.getNumOccurrences() > 0 &&
1928 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1929 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1930 SetFixed(ClMappingOffset);
1931 }
1932}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define clEnumVal(ENUMVAL, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains constants used for implementing Dwarf debug support.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden)
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< bool > ClFrameRecords("hwasan-with-frame-record", cl::desc("Use ring buffer for stack allocations"), cl::Hidden)
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< float > ClRandomKeepRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function. " "Note: instrumentation can be skipped randomly " "OR because of the hot percentile cutoff, if " "both are supplied."))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClStrictUseAfterScope("hwasan-strict-use-after-scope", cl::desc("for complicated lifetimes, tag both on end and return"), cl::Hidden, cl::init(true))
static cl::opt< OffsetKind > ClMappingOffsetDynamic("hwasan-mapping-offset-dynamic", cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden, cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"), clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"), clEnumValN(OffsetKind::kTls, "tls", "Use TLS")))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
static cl::opt< bool > ClAllGlobals("hwasan-all-globals", cl::desc("Instrument globals, even those within user-defined sections. Warning: " "This may break existing code which walks globals via linker-generated " "symbols, expects certain globals to be contiguous with each other, or " "makes other assumptions which are invalidated by HWASan " "instrumentation."), cl::Hidden, cl::init(false))
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClStaticLinking("hwasan-static-linking", cl::desc("Don't use .note.hwasan.globals section to instrument globals " "from loadable libraries. " "Note: in static binaries, the global variables section can be " "accessed directly via linker-provided " "__start_hwasan_globals and __stop_hwasan_globals symbols"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cutoff."))
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
#define T
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition Constants.h:720
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:491
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Class to represent function types.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:613
StringRef getSection() const
Get the custom section of this global if it has one.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:215
bool hasSection() const
Check if this global has a custom object file section.
LLVM_ABI const SanitizerMetadata & getSanitizerMetadata() const
Definition Globals.cpp:246
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
LinkageTypes getLinkage() const
bool isDeclarationForLinker() const
bool hasSanitizerMetadata() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
bool hasCommonLinkage() const
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:56
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:530
Analysis pass providing a never-invalidated alias analysis result.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition IRBuilder.h:1957
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2223
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2171
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1516
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:561
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2025
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition IRBuilder.h:1175
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1457
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2312
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2316
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1854
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1495
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Definition IRBuilder.h:629
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2054
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1554
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1867
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1406
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2487
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2040
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:604
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2320
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2249
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1535
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1602
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1576
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
LLVM_ABI void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LLVM_ABI void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition MD5.cpp:188
LLVM_ABI void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition MD5.cpp:233
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
bool empty() const
Definition MapVector.h:77
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition Module.cpp:262
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:222
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:413
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isAndroidVersionLT(unsigned Major) const
Definition Triple.h:861
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:859
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:420
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition Triple.h:1130
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition Triple.h:1048
bool isOSFuchsia() const
Definition Triple.h:672
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:803
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1129
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:561
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition ELF.h:1803
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Value * getFP(IRBuilder<> &IRB)
bool isSupportedLifetime(const AllocaInfo &AInfo, const DominatorTree *DT, const LoopInfo *LI)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
Value * incrementThreadLong(IRBuilder<> &IRB, Value *ThreadLong, unsigned int Inc, bool IsMemtagDarwin=false)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const AllocaInfo &AInfo, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3895
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 8 > RetVec