LLVM 20.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/MD5.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
85
86static const unsigned kShadowBaseAlignment = 32;
87
88namespace {
89enum class OffsetKind {
90 kFixed = 0,
91 kGlobal,
92 kIfunc,
93 kTls,
94};
95}
96
98 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
99 cl::desc("Prefix for memory access callbacks"),
100 cl::Hidden, cl::init("__hwasan_"));
101
103 "hwasan-kernel-mem-intrinsic-prefix",
104 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
105 cl::init(false));
106
108 "hwasan-instrument-with-calls",
109 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
110 cl::init(false));
111
112static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
113 cl::desc("instrument read instructions"),
114 cl::Hidden, cl::init(true));
115
116static cl::opt<bool>
117 ClInstrumentWrites("hwasan-instrument-writes",
118 cl::desc("instrument write instructions"), cl::Hidden,
119 cl::init(true));
120
122 "hwasan-instrument-atomics",
123 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
124 cl::init(true));
125
126static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
127 cl::desc("instrument byval arguments"),
128 cl::Hidden, cl::init(true));
129
130static cl::opt<bool>
131 ClRecover("hwasan-recover",
132 cl::desc("Enable recovery mode (continue-after-error)."),
133 cl::Hidden, cl::init(false));
134
135static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
136 cl::desc("instrument stack (allocas)"),
137 cl::Hidden, cl::init(true));
138
139static cl::opt<bool>
140 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
141 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
143
145 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
147 cl::desc("How many lifetime ends to handle for a single alloca."),
149
150static cl::opt<bool>
151 ClUseAfterScope("hwasan-use-after-scope",
152 cl::desc("detect use after scope within function"),
153 cl::Hidden, cl::init(true));
154
156 "hwasan-generate-tags-with-calls",
157 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
158 cl::init(false));
159
160static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
161 cl::Hidden, cl::init(false));
162
164 "hwasan-match-all-tag",
165 cl::desc("don't report bad accesses via pointers with this tag"),
166 cl::Hidden, cl::init(-1));
167
168static cl::opt<bool>
169 ClEnableKhwasan("hwasan-kernel",
170 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
171 cl::Hidden, cl::init(false));
172
173// These flags allow to change the shadow mapping and control how shadow memory
174// is accessed. The shadow mapping looks like:
175// Shadow = (Mem >> scale) + offset
176
178 ClMappingOffset("hwasan-mapping-offset",
179 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
180 cl::Hidden);
181
183 "hwasan-mapping-offset-dynamic",
184 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
185 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
186 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
187 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
188
189static cl::opt<bool>
190 ClFrameRecords("hwasan-with-frame-record",
191 cl::desc("Use ring buffer for stack allocations"),
192 cl::Hidden);
193
194static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
195 cl::desc("Hot percentile cuttoff."));
196
197static cl::opt<float>
198 ClRandomSkipRate("hwasan-random-rate",
199 cl::desc("Probability value in the range [0.0, 1.0] "
200 "to keep instrumentation of a function."));
201
202STATISTIC(NumTotalFuncs, "Number of total funcs");
203STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
204STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
205
206// Mode for selecting how to insert frame record info into the stack ring
207// buffer.
209 // Do not record frame record info.
211
212 // Insert instructions into the prologue for storing into the stack ring
213 // buffer directly.
215
216 // Add a call to __hwasan_add_frame_record in the runtime.
218};
219
221 "hwasan-record-stack-history",
222 cl::desc("Record stack frames with tagged allocations in a thread-local "
223 "ring buffer"),
224 cl::values(clEnumVal(none, "Do not record stack ring history"),
225 clEnumVal(instr, "Insert instructions into the prologue for "
226 "storing into the stack ring buffer directly"),
227 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
228 "storing into the stack ring buffer")),
230
231static cl::opt<bool>
232 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
233 cl::desc("instrument memory intrinsics"),
234 cl::Hidden, cl::init(true));
235
236static cl::opt<bool>
237 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
238 cl::desc("instrument landing pads"), cl::Hidden,
239 cl::init(false));
240
242 "hwasan-use-short-granules",
243 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
244 cl::init(false));
245
247 "hwasan-instrument-personality-functions",
248 cl::desc("instrument personality functions"), cl::Hidden);
249
250static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
251 cl::desc("inline all checks"),
252 cl::Hidden, cl::init(false));
253
254static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
255 cl::desc("inline all checks"),
256 cl::Hidden, cl::init(false));
257
258// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
259static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
260 cl::desc("Use page aliasing in HWASan"),
261 cl::Hidden, cl::init(false));
262
263namespace {
264
265template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
266 return Opt.getNumOccurrences() ? Opt : Other;
267}
268
269bool shouldUsePageAliases(const Triple &TargetTriple) {
270 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
271}
272
273bool shouldInstrumentStack(const Triple &TargetTriple) {
274 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
275}
276
277bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
278 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
279}
280
281bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
282 return optOr(ClUseStackSafety, !DisableOptimization);
283}
284
285bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
286 bool DisableOptimization) {
287 return shouldInstrumentStack(TargetTriple) &&
288 mightUseStackSafetyAnalysis(DisableOptimization);
289}
290
291bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
292 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
293}
294
295/// An instrumentation pass implementing detection of addressability bugs
296/// using tagged pointers.
297class HWAddressSanitizer {
298public:
299 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
300 const StackSafetyGlobalInfo *SSI)
301 : M(M), SSI(SSI) {
302 this->Recover = optOr(ClRecover, Recover);
303 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
304 this->Rng = ClRandomSkipRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
305 : nullptr;
306
307 initializeModule();
308 }
309
310 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
311
312private:
313 struct ShadowTagCheckInfo {
314 Instruction *TagMismatchTerm = nullptr;
315 Value *PtrLong = nullptr;
316 Value *AddrLong = nullptr;
317 Value *PtrTag = nullptr;
318 Value *MemTag = nullptr;
319 };
320
321 bool selectiveInstrumentationShouldSkip(Function &F,
323 void initializeModule();
324 void createHwasanCtorComdat();
325 void removeFnAttributes(Function *F);
326
327 void initializeCallbacks(Module &M);
328
329 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
330
331 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
332 Value *getShadowNonTls(IRBuilder<> &IRB);
333
334 void untagPointerOperand(Instruction *I, Value *Addr);
335 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
336
337 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
338 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
339 DomTreeUpdater &DTU, LoopInfo *LI);
340 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
341 unsigned AccessSizeIndex,
342 Instruction *InsertBefore,
343 DomTreeUpdater &DTU, LoopInfo *LI);
344 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
345 unsigned AccessSizeIndex,
346 Instruction *InsertBefore, DomTreeUpdater &DTU,
347 LoopInfo *LI);
348 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
349 void instrumentMemIntrinsic(MemIntrinsic *MI);
350 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
351 LoopInfo *LI);
352 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
353 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
354 Value *Ptr);
355
358 const TargetLibraryInfo &TLI,
360
361 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
362 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
363 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
364 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
365 const DominatorTree &DT, const PostDominatorTree &PDT,
366 const LoopInfo &LI);
367 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
368 Value *getNextTagWithCall(IRBuilder<> &IRB);
369 Value *getStackBaseTag(IRBuilder<> &IRB);
370 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
371 Value *getUARTag(IRBuilder<> &IRB);
372
373 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
374 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
375 unsigned retagMask(unsigned AllocaNo);
376
377 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
378
379 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
380 void instrumentGlobals();
381
382 Value *getCachedFP(IRBuilder<> &IRB);
383 Value *getFrameRecordInfo(IRBuilder<> &IRB);
384
385 void instrumentPersonalityFunctions();
386
387 LLVMContext *C;
388 Module &M;
389 const StackSafetyGlobalInfo *SSI;
390 Triple TargetTriple;
391 std::unique_ptr<RandomNumberGenerator> Rng;
392
393 /// This struct defines the shadow mapping using the rule:
394 /// If `kFixed`, then
395 /// shadow = (mem >> Scale) + Offset.
396 /// If `kGlobal`, then
397 /// extern char* __hwasan_shadow_memory_dynamic_address;
398 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
399 /// If `kIfunc`, then
400 /// extern char __hwasan_shadow[];
401 /// shadow = (mem >> Scale) + &__hwasan_shadow
402 /// If `kTls`, then
403 /// extern char *__hwasan_tls;
404 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
405 ///
406 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
407 /// ring buffer for storing stack allocations on targets that support it.
408 class ShadowMapping {
409 OffsetKind Kind;
411 uint8_t Scale;
412 bool WithFrameRecord;
413
414 void SetFixed(uint64_t O) {
415 Kind = OffsetKind::kFixed;
416 Offset = O;
417 }
418
419 public:
420 void init(Triple &TargetTriple, bool InstrumentWithCalls);
421 Align getObjectAlignment() const { return Align(1ULL << Scale); }
422 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
423 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
424 bool isInTls() const { return Kind == OffsetKind::kTls; }
425 bool isFixed() const { return Kind == OffsetKind::kFixed; }
426 uint8_t scale() const { return Scale; };
427 uint64_t offset() const {
428 assert(isFixed());
429 return Offset;
430 };
431 bool withFrameRecord() const { return WithFrameRecord; };
432 };
433
434 ShadowMapping Mapping;
435
436 Type *VoidTy = Type::getVoidTy(M.getContext());
437 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
438 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
439 Type *Int8Ty = Type::getInt8Ty(M.getContext());
440 Type *Int32Ty = Type::getInt32Ty(M.getContext());
441 Type *Int64Ty = Type::getInt64Ty(M.getContext());
442
443 bool CompileKernel;
444 bool Recover;
445 bool OutlinedChecks;
446 bool InlineFastPath;
447 bool UseShortGranules;
448 bool InstrumentLandingPads;
449 bool InstrumentWithCalls;
450 bool InstrumentStack;
451 bool InstrumentGlobals;
452 bool DetectUseAfterScope;
453 bool UsePageAliases;
454 bool UseMatchAllCallback;
455
456 std::optional<uint8_t> MatchAllTag;
457
458 unsigned PointerTagShift;
459 uint64_t TagMaskByte;
460
461 Function *HwasanCtorFunction;
462
463 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
464 FunctionCallee HwasanMemoryAccessCallbackSized[2];
465
466 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
467 FunctionCallee HwasanHandleVfork;
468
469 FunctionCallee HwasanTagMemoryFunc;
470 FunctionCallee HwasanGenerateTagFunc;
471 FunctionCallee HwasanRecordFrameRecordFunc;
472
473 Constant *ShadowGlobal;
474
475 Value *ShadowBase = nullptr;
476 Value *StackBaseTag = nullptr;
477 Value *CachedFP = nullptr;
478 GlobalValue *ThreadPtrGlobal = nullptr;
479};
480
481} // end anonymous namespace
482
485 // Return early if nosanitize_hwaddress module flag is present for the module.
486 if (checkIfAlreadyInstrumented(M, "nosanitize_hwaddress"))
487 return PreservedAnalyses::all();
488 const StackSafetyGlobalInfo *SSI = nullptr;
489 auto TargetTriple = llvm::Triple(M.getTargetTriple());
490 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
492
493 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
494 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
495 for (Function &F : M)
496 HWASan.sanitizeFunction(F, FAM);
497
499 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
500 // are incrementally updated throughout this pass whenever
501 // SplitBlockAndInsertIfThen is called.
505 // GlobalsAA is considered stateless and does not get invalidated unless
506 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
507 // make changes that require GlobalsAA to be invalidated.
508 PA.abandon<GlobalsAA>();
509 return PA;
510}
512 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
514 OS, MapClassName2PassName);
515 OS << '<';
516 if (Options.CompileKernel)
517 OS << "kernel;";
518 if (Options.Recover)
519 OS << "recover";
520 OS << '>';
521}
522
523void HWAddressSanitizer::createHwasanCtorComdat() {
524 std::tie(HwasanCtorFunction, std::ignore) =
527 /*InitArgTypes=*/{},
528 /*InitArgs=*/{},
529 // This callback is invoked when the functions are created the first
530 // time. Hook them into the global ctors list in that case:
531 [&](Function *Ctor, FunctionCallee) {
532 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
533 Ctor->setComdat(CtorComdat);
534 appendToGlobalCtors(M, Ctor, 0, Ctor);
535 });
536
537 // Create a note that contains pointers to the list of global
538 // descriptors. Adding a note to the output file will cause the linker to
539 // create a PT_NOTE program header pointing to the note that we can use to
540 // find the descriptor list starting from the program headers. A function
541 // provided by the runtime initializes the shadow memory for the globals by
542 // accessing the descriptor list via the note. The dynamic loader needs to
543 // call this function whenever a library is loaded.
544 //
545 // The reason why we use a note for this instead of a more conventional
546 // approach of having a global constructor pass a descriptor list pointer to
547 // the runtime is because of an order of initialization problem. With
548 // constructors we can encounter the following problematic scenario:
549 //
550 // 1) library A depends on library B and also interposes one of B's symbols
551 // 2) B's constructors are called before A's (as required for correctness)
552 // 3) during construction, B accesses one of its "own" globals (actually
553 // interposed by A) and triggers a HWASAN failure due to the initialization
554 // for A not having happened yet
555 //
556 // Even without interposition it is possible to run into similar situations in
557 // cases where two libraries mutually depend on each other.
558 //
559 // We only need one note per binary, so put everything for the note in a
560 // comdat. This needs to be a comdat with an .init_array section to prevent
561 // newer versions of lld from discarding the note.
562 //
563 // Create the note even if we aren't instrumenting globals. This ensures that
564 // binaries linked from object files with both instrumented and
565 // non-instrumented globals will end up with a note, even if a comdat from an
566 // object file with non-instrumented globals is selected. The note is harmless
567 // if the runtime doesn't support it, since it will just be ignored.
568 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
569
570 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
571 auto *Start =
572 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
573 nullptr, "__start_hwasan_globals");
574 Start->setVisibility(GlobalValue::HiddenVisibility);
575 auto *Stop =
576 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
577 nullptr, "__stop_hwasan_globals");
578 Stop->setVisibility(GlobalValue::HiddenVisibility);
579
580 // Null-terminated so actually 8 bytes, which are required in order to align
581 // the note properly.
582 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
583
584 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
585 Int32Ty, Int32Ty);
586 auto *Note =
587 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
589 Note->setSection(".note.hwasan.globals");
590 Note->setComdat(NoteComdat);
591 Note->setAlignment(Align(4));
592
593 // The pointers in the note need to be relative so that the note ends up being
594 // placed in rodata, which is the standard location for notes.
595 auto CreateRelPtr = [&](Constant *Ptr) {
599 Int32Ty);
600 };
601 Note->setInitializer(ConstantStruct::getAnon(
602 {ConstantInt::get(Int32Ty, 8), // n_namesz
603 ConstantInt::get(Int32Ty, 8), // n_descsz
604 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
605 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
607
608 // Create a zero-length global in hwasan_globals so that the linker will
609 // always create start and stop symbols.
610 auto *Dummy = new GlobalVariable(
611 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
612 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
613 Dummy->setSection("hwasan_globals");
614 Dummy->setComdat(NoteComdat);
615 Dummy->setMetadata(LLVMContext::MD_associated,
617 appendToCompilerUsed(M, Dummy);
618}
619
620void HWAddressSanitizer::removeFnAttributes(Function *F) {
621 // Remove memory attributes that are invalid with HWASan.
622 // HWASan checks read from shadow, which invalidates memory(argmem: *)
623 // Short granule checks on function arguments read from the argument memory
624 // (last byte of the granule), which invalidates writeonly.
625 //
626 // This is not only true for sanitized functions, because AttrInfer can
627 // infer those attributes on libc functions, which is not true if those
628 // are instrumented (Android) or intercepted.
629 //
630 // We might want to model HWASan shadow memory more opaquely to get rid of
631 // this problem altogether, by hiding the shadow memory write in an
632 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
633 // for another day.
634
635 // The API is weird. `onlyReadsMemory` actually means "does not write", and
636 // `onlyWritesMemory` actually means "does not read". So we reconstruct
637 // "accesses memory" && "does not read" <=> "writes".
638 bool Changed = false;
639 if (!F->doesNotAccessMemory()) {
640 bool WritesMemory = !F->onlyReadsMemory();
641 bool ReadsMemory = !F->onlyWritesMemory();
642 if ((WritesMemory && !ReadsMemory) || F->onlyAccessesArgMemory()) {
643 F->removeFnAttr(Attribute::Memory);
644 Changed = true;
645 }
646 }
647 for (Argument &A : F->args()) {
648 if (A.hasAttribute(Attribute::WriteOnly)) {
649 A.removeAttr(Attribute::WriteOnly);
650 Changed = true;
651 }
652 }
653 if (Changed) {
654 // nobuiltin makes sure later passes don't restore assumptions about
655 // the function.
656 F->addFnAttr(Attribute::NoBuiltin);
657 }
658}
659
660/// Module-level initialization.
661///
662/// inserts a call to __hwasan_init to the module's constructor list.
663void HWAddressSanitizer::initializeModule() {
664 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
665 TargetTriple = Triple(M.getTargetTriple());
666
667 for (Function &F : M.functions())
668 removeFnAttributes(&F);
669
670 // x86_64 currently has two modes:
671 // - Intel LAM (default)
672 // - pointer aliasing (heap only)
673 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
674 UsePageAliases = shouldUsePageAliases(TargetTriple);
675 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
676 InstrumentStack = shouldInstrumentStack(TargetTriple);
677 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
678 PointerTagShift = IsX86_64 ? 57 : 56;
679 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
680
681 Mapping.init(TargetTriple, InstrumentWithCalls);
682
683 C = &(M.getContext());
684 IRBuilder<> IRB(*C);
685
686 HwasanCtorFunction = nullptr;
687
688 // Older versions of Android do not have the required runtime support for
689 // short granules, global or personality function instrumentation. On other
690 // platforms we currently require using the latest version of the runtime.
691 bool NewRuntime =
692 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
693
694 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
695 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
696 TargetTriple.isOSBinFormatELF() &&
697 !optOr(ClInlineAllChecks, Recover);
698
699 // These platforms may prefer less inlining to reduce binary size.
700 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
701 TargetTriple.isOSFuchsia()));
702
703 if (ClMatchAllTag.getNumOccurrences()) {
704 if (ClMatchAllTag != -1) {
705 MatchAllTag = ClMatchAllTag & 0xFF;
706 }
707 } else if (CompileKernel) {
708 MatchAllTag = 0xFF;
709 }
710 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
711
712 // If we don't have personality function support, fall back to landing pads.
713 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
714
715 InstrumentGlobals =
716 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
717
718 if (!CompileKernel) {
719 createHwasanCtorComdat();
720
721 if (InstrumentGlobals)
722 instrumentGlobals();
723
724 bool InstrumentPersonalityFunctions =
725 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
726 if (InstrumentPersonalityFunctions)
727 instrumentPersonalityFunctions();
728 }
729
730 if (!TargetTriple.isAndroid()) {
731 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
732 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
734 "__hwasan_tls", nullptr,
737 return GV;
738 });
739 ThreadPtrGlobal = cast<GlobalVariable>(C);
740 }
741}
742
743void HWAddressSanitizer::initializeCallbacks(Module &M) {
744 IRBuilder<> IRB(*C);
745 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
746 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
747 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
748 *HwasanMemsetFnTy;
749 if (UseMatchAllCallback) {
750 HwasanMemoryAccessCallbackSizedFnTy =
751 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
752 HwasanMemoryAccessCallbackFnTy =
753 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
754 HwasanMemTransferFnTy =
755 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
756 HwasanMemsetFnTy =
757 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
758 } else {
759 HwasanMemoryAccessCallbackSizedFnTy =
760 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
761 HwasanMemoryAccessCallbackFnTy =
762 FunctionType::get(VoidTy, {IntptrTy}, false);
763 HwasanMemTransferFnTy =
764 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
765 HwasanMemsetFnTy =
766 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
767 }
768
769 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
770 const std::string TypeStr = AccessIsWrite ? "store" : "load";
771 const std::string EndingStr = Recover ? "_noabort" : "";
772
773 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
774 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
775 HwasanMemoryAccessCallbackSizedFnTy);
776
777 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
778 AccessSizeIndex++) {
779 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
780 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
781 itostr(1ULL << AccessSizeIndex) +
782 MatchAllStr + EndingStr,
783 HwasanMemoryAccessCallbackFnTy);
784 }
785 }
786
787 const std::string MemIntrinCallbackPrefix =
788 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
789 ? std::string("")
791
792 HwasanMemmove = M.getOrInsertFunction(
793 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
794 HwasanMemcpy = M.getOrInsertFunction(
795 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
796 HwasanMemset = M.getOrInsertFunction(
797 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
798
799 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
800 PtrTy, Int8Ty, IntptrTy);
801 HwasanGenerateTagFunc =
802 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
803
804 HwasanRecordFrameRecordFunc =
805 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
806
807 ShadowGlobal =
808 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
809
810 HwasanHandleVfork =
811 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
812}
813
814Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
815 // An empty inline asm with input reg == output reg.
816 // An opaque no-op cast, basically.
817 // This prevents code bloat as a result of rematerializing trivial definitions
818 // such as constants or global addresses at every load and store.
819 InlineAsm *Asm =
820 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
821 StringRef(""), StringRef("=r,0"),
822 /*hasSideEffects=*/false);
823 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
824}
825
826Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
827 return getOpaqueNoopCast(IRB, ShadowGlobal);
828}
829
830Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
831 if (Mapping.isFixed()) {
832 return getOpaqueNoopCast(
834 ConstantInt::get(IntptrTy, Mapping.offset()), PtrTy));
835 }
836
837 if (Mapping.isInIfunc())
838 return getDynamicShadowIfunc(IRB);
839
840 Value *GlobalDynamicAddress =
843 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
844}
845
846bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
847 Value *Ptr) {
848 // Do not instrument accesses from different address spaces; we cannot deal
849 // with them.
850 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
851 if (PtrTy->getPointerAddressSpace() != 0)
852 return true;
853
854 // Ignore swifterror addresses.
855 // swifterror memory addresses are mem2reg promoted by instruction
856 // selection. As such they cannot have regular uses like an instrumentation
857 // function and it makes no sense to track them as memory.
858 if (Ptr->isSwiftError())
859 return true;
860
861 if (findAllocaForValue(Ptr)) {
862 if (!InstrumentStack)
863 return true;
864 if (SSI && SSI->stackAccessIsSafe(*Inst))
865 return true;
866 }
867
868 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
869 if (!InstrumentGlobals)
870 return true;
871 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
872 }
873
874 return false;
875}
876
877bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
878 Instruction *Inst, Value *Ptr) {
879 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
880 if (Ignored) {
881 ORE.emit(
882 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
883 } else {
884 ORE.emit([&]() {
885 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
886 });
887 }
888 return Ignored;
889}
890
891void HWAddressSanitizer::getInterestingMemoryOperands(
893 const TargetLibraryInfo &TLI,
895 // Skip memory accesses inserted by another instrumentation.
896 if (I->hasMetadata(LLVMContext::MD_nosanitize))
897 return;
898
899 // Do not instrument the load fetching the dynamic shadow address.
900 if (ShadowBase == I)
901 return;
902
903 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
904 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
905 return;
906 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
907 LI->getType(), LI->getAlign());
908 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
909 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
910 return;
911 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
912 SI->getValueOperand()->getType(), SI->getAlign());
913 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
914 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
915 return;
916 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
917 RMW->getValOperand()->getType(), std::nullopt);
918 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
919 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
920 return;
921 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
922 XCHG->getCompareOperand()->getType(),
923 std::nullopt);
924 } else if (auto *CI = dyn_cast<CallInst>(I)) {
925 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
926 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
927 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
928 continue;
929 Type *Ty = CI->getParamByValType(ArgNo);
930 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
931 }
933 }
934}
935
937 if (LoadInst *LI = dyn_cast<LoadInst>(I))
938 return LI->getPointerOperandIndex();
939 if (StoreInst *SI = dyn_cast<StoreInst>(I))
940 return SI->getPointerOperandIndex();
941 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
942 return RMW->getPointerOperandIndex();
943 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
944 return XCHG->getPointerOperandIndex();
945 report_fatal_error("Unexpected instruction");
946 return -1;
947}
948
950 size_t Res = llvm::countr_zero(TypeSize / 8);
952 return Res;
953}
954
955void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
956 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
957 TargetTriple.isRISCV64())
958 return;
959
960 IRBuilder<> IRB(I);
961 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
962 Value *UntaggedPtr =
963 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
964 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
965}
966
967Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
968 // Mem >> Scale
969 Value *Shadow = IRB.CreateLShr(Mem, Mapping.scale());
970 if (Mapping.isFixed() && Mapping.offset() == 0)
971 return IRB.CreateIntToPtr(Shadow, PtrTy);
972 // (Mem >> Scale) + Offset
973 return IRB.CreatePtrAdd(ShadowBase, Shadow);
974}
975
976int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
977 unsigned AccessSizeIndex) {
978 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
979 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
980 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
981 (Recover << HWASanAccessInfo::RecoverShift) |
982 (IsWrite << HWASanAccessInfo::IsWriteShift) |
983 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
984}
985
986HWAddressSanitizer::ShadowTagCheckInfo
987HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
988 DomTreeUpdater &DTU, LoopInfo *LI) {
989 ShadowTagCheckInfo R;
990
991 IRBuilder<> IRB(InsertBefore);
992
993 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
994 R.PtrTag =
995 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
996 R.AddrLong = untagPointer(IRB, R.PtrLong);
997 Value *Shadow = memToShadow(R.AddrLong, IRB);
998 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
999 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
1000
1001 if (MatchAllTag.has_value()) {
1002 Value *TagNotIgnored = IRB.CreateICmpNE(
1003 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
1004 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
1005 }
1006
1007 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
1008 TagMismatch, InsertBefore, false,
1009 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1010
1011 return R;
1012}
1013
1014void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
1015 unsigned AccessSizeIndex,
1016 Instruction *InsertBefore,
1017 DomTreeUpdater &DTU,
1018 LoopInfo *LI) {
1019 assert(!UsePageAliases);
1020 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1021
1022 if (InlineFastPath)
1023 InsertBefore =
1024 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
1025
1026 IRBuilder<> IRB(InsertBefore);
1027 bool UseFixedShadowIntrinsic = false;
1028 // The memaccess fixed shadow intrinsic is only supported on AArch64,
1029 // which allows a 16-bit immediate to be left-shifted by 32.
1030 // Since kShadowBaseAlignment == 32, and Linux by default will not
1031 // mmap above 48-bits, practically any valid shadow offset is
1032 // representable.
1033 // In particular, an offset of 4TB (1024 << 32) is representable, and
1034 // ought to be good enough for anybody.
1035 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1036 uint16_t OffsetShifted = Mapping.offset() >> 32;
1037 UseFixedShadowIntrinsic =
1038 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1039 }
1040
1041 if (UseFixedShadowIntrinsic) {
1042 IRB.CreateIntrinsic(
1043 UseShortGranules
1044 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1045 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1046 {},
1047 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
1048 ConstantInt::get(Int64Ty, Mapping.offset())});
1049 } else {
1050 IRB.CreateIntrinsic(
1051 UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1052 : Intrinsic::hwasan_check_memaccess,
1053 {}, {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
1054 }
1055}
1056
1057void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1058 unsigned AccessSizeIndex,
1059 Instruction *InsertBefore,
1060 DomTreeUpdater &DTU,
1061 LoopInfo *LI) {
1062 assert(!UsePageAliases);
1063 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1064
1065 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1066
1067 IRBuilder<> IRB(TCI.TagMismatchTerm);
1068 Value *OutOfShortGranuleTagRange =
1069 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
1070 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1071 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
1072 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1073
1074 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1075 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1076 PtrLowBits = IRB.CreateAdd(
1077 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1078 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1079 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1081 LI, CheckFailTerm->getParent());
1082
1083 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1084 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1085 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1086 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1087 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1088 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1090 LI, CheckFailTerm->getParent());
1091
1092 IRB.SetInsertPoint(CheckFailTerm);
1093 InlineAsm *Asm;
1094 switch (TargetTriple.getArch()) {
1095 case Triple::x86_64:
1096 // The signal handler will find the data address in rdi.
1098 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1099 "int3\nnopl " +
1100 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1101 "(%rax)",
1102 "{rdi}",
1103 /*hasSideEffects=*/true);
1104 break;
1105 case Triple::aarch64:
1106 case Triple::aarch64_be:
1107 // The signal handler will find the data address in x0.
1109 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1110 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1111 "{x0}",
1112 /*hasSideEffects=*/true);
1113 break;
1114 case Triple::riscv64:
1115 // The signal handler will find the data address in x10.
1117 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1118 "ebreak\naddiw x0, x11, " +
1119 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1120 "{x10}",
1121 /*hasSideEffects=*/true);
1122 break;
1123 default:
1124 report_fatal_error("unsupported architecture");
1125 }
1126 IRB.CreateCall(Asm, TCI.PtrLong);
1127 if (Recover)
1128 cast<BranchInst>(CheckFailTerm)
1129 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1130}
1131
1132bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1133 MemIntrinsic *MI) {
1134 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1135 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1136 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1137 }
1138 if (isa<MemSetInst>(MI))
1139 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1140 return false;
1141}
1142
1143void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1144 IRBuilder<> IRB(MI);
1145 if (isa<MemTransferInst>(MI)) {
1147 MI->getOperand(0), MI->getOperand(1),
1148 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1149
1150 if (UseMatchAllCallback)
1151 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1152 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1153 } else if (isa<MemSetInst>(MI)) {
1155 MI->getOperand(0),
1156 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1157 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1158 if (UseMatchAllCallback)
1159 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1160 IRB.CreateCall(HwasanMemset, Args);
1161 }
1162 MI->eraseFromParent();
1163}
1164
1165bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1166 DomTreeUpdater &DTU,
1167 LoopInfo *LI) {
1168 Value *Addr = O.getPtr();
1169
1170 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1171
1172 if (O.MaybeMask)
1173 return false; // FIXME
1174
1175 IRBuilder<> IRB(O.getInsn());
1176 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1177 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1178 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1179 *O.Alignment >= O.TypeStoreSize / 8)) {
1180 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1181 if (InstrumentWithCalls) {
1183 if (UseMatchAllCallback)
1184 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1185 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1186 Args);
1187 } else if (OutlinedChecks) {
1188 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1189 DTU, LI);
1190 } else {
1191 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1192 DTU, LI);
1193 }
1194 } else {
1196 IRB.CreatePointerCast(Addr, IntptrTy),
1197 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1198 ConstantInt::get(IntptrTy, 8))};
1199 if (UseMatchAllCallback)
1200 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1201 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1202 }
1203 untagPointerOperand(O.getInsn(), Addr);
1204
1205 return true;
1206}
1207
1208void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1209 size_t Size) {
1210 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1211 if (!UseShortGranules)
1212 Size = AlignedSize;
1213
1214 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1215 if (InstrumentWithCalls) {
1216 IRB.CreateCall(HwasanTagMemoryFunc,
1217 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1218 ConstantInt::get(IntptrTy, AlignedSize)});
1219 } else {
1220 size_t ShadowSize = Size >> Mapping.scale();
1221 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1222 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1223 // If this memset is not inlined, it will be intercepted in the hwasan
1224 // runtime library. That's OK, because the interceptor skips the checks if
1225 // the address is in the shadow region.
1226 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1227 // llvm.memset right here into either a sequence of stores, or a call to
1228 // hwasan_tag_memory.
1229 if (ShadowSize)
1230 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1231 if (Size != AlignedSize) {
1232 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1233 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1234 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1235 IRB.CreateStore(
1236 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1237 AlignedSize - 1));
1238 }
1239 }
1240}
1241
1242unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1243 if (TargetTriple.getArch() == Triple::x86_64)
1244 return AllocaNo & TagMaskByte;
1245
1246 // A list of 8-bit numbers that have at most one run of non-zero bits.
1247 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1248 // masks.
1249 // The list does not include the value 255, which is used for UAR.
1250 //
1251 // Because we are more likely to use earlier elements of this list than later
1252 // ones, it is sorted in increasing order of probability of collision with a
1253 // mask allocated (temporally) nearby. The program that generated this list
1254 // can be found at:
1255 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1256 static const unsigned FastMasks[] = {
1257 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1258 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1259 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1260 return FastMasks[AllocaNo % std::size(FastMasks)];
1261}
1262
1263Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1264 if (TagMaskByte == 0xFF)
1265 return OldTag; // No need to clear the tag byte.
1266 return IRB.CreateAnd(OldTag,
1267 ConstantInt::get(OldTag->getType(), TagMaskByte));
1268}
1269
1270Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1271 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1272}
1273
1274Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1276 return nullptr;
1277 if (StackBaseTag)
1278 return StackBaseTag;
1279 // Extract some entropy from the stack pointer for the tags.
1280 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1281 // between functions).
1282 Value *FramePointerLong = getCachedFP(IRB);
1283 Value *StackTag =
1284 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1285 IRB.CreateLShr(FramePointerLong, 20)));
1286 StackTag->setName("hwasan.stack.base.tag");
1287 return StackTag;
1288}
1289
1290Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1291 unsigned AllocaNo) {
1293 return getNextTagWithCall(IRB);
1294 return IRB.CreateXor(
1295 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1296}
1297
1298Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1299 Value *FramePointerLong = getCachedFP(IRB);
1300 Value *UARTag =
1301 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1302
1303 UARTag->setName("hwasan.uar.tag");
1304 return UARTag;
1305}
1306
1307// Add a tag to an address.
1308Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1309 Value *PtrLong, Value *Tag) {
1310 assert(!UsePageAliases);
1311 Value *TaggedPtrLong;
1312 if (CompileKernel) {
1313 // Kernel addresses have 0xFF in the most significant byte.
1314 Value *ShiftedTag =
1315 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1316 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1317 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1318 } else {
1319 // Userspace can simply do OR (tag << PointerTagShift);
1320 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1321 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1322 }
1323 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1324}
1325
1326// Remove tag from an address.
1327Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1328 assert(!UsePageAliases);
1329 Value *UntaggedPtrLong;
1330 if (CompileKernel) {
1331 // Kernel addresses have 0xFF in the most significant byte.
1332 UntaggedPtrLong =
1333 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1334 TagMaskByte << PointerTagShift));
1335 } else {
1336 // Userspace addresses have 0x00.
1337 UntaggedPtrLong = IRB.CreateAnd(
1338 PtrLong, ConstantInt::get(PtrLong->getType(),
1339 ~(TagMaskByte << PointerTagShift)));
1340 }
1341 return UntaggedPtrLong;
1342}
1343
1344Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1345 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1346 // in Bionic's libc/platform/bionic/tls_defines.h.
1347 constexpr int SanitizerSlot = 6;
1348 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1349 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1350 return ThreadPtrGlobal;
1351}
1352
1353Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1354 if (!CachedFP)
1355 CachedFP = memtag::getFP(IRB);
1356 return CachedFP;
1357}
1358
1359Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1360 // Prepare ring buffer data.
1361 Value *PC = memtag::getPC(TargetTriple, IRB);
1362 Value *FP = getCachedFP(IRB);
1363
1364 // Mix FP and PC.
1365 // Assumptions:
1366 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1367 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1368 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1369 // 0xFFFFPPPPPPPPPPPP
1370 //
1371 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1372 // prefer FP-relative offsets for functions compiled with HWASan.
1373 FP = IRB.CreateShl(FP, 44);
1374 return IRB.CreateOr(PC, FP);
1375}
1376
1377void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1378 if (!Mapping.isInTls())
1379 ShadowBase = getShadowNonTls(IRB);
1380 else if (!WithFrameRecord && TargetTriple.isAndroid())
1381 ShadowBase = getDynamicShadowIfunc(IRB);
1382
1383 if (!WithFrameRecord && ShadowBase)
1384 return;
1385
1386 Value *SlotPtr = nullptr;
1387 Value *ThreadLong = nullptr;
1388 Value *ThreadLongMaybeUntagged = nullptr;
1389
1390 auto getThreadLongMaybeUntagged = [&]() {
1391 if (!SlotPtr)
1392 SlotPtr = getHwasanThreadSlotPtr(IRB);
1393 if (!ThreadLong)
1394 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1395 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1396 // TBI.
1397 return TargetTriple.isAArch64() ? ThreadLong
1398 : untagPointer(IRB, ThreadLong);
1399 };
1400
1401 if (WithFrameRecord) {
1402 switch (ClRecordStackHistory) {
1403 case libcall: {
1404 // Emit a runtime call into hwasan rather than emitting instructions for
1405 // recording stack history.
1406 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1407 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1408 break;
1409 }
1410 case instr: {
1411 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1412
1413 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1414
1415 // Store data to ring buffer.
1416 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1417 Value *RecordPtr =
1418 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1419 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1420
1421 IRB.CreateStore(memtag::incrementThreadLong(IRB, ThreadLong, 8), SlotPtr);
1422 break;
1423 }
1424 case none: {
1426 "A stack history recording mode should've been selected.");
1427 }
1428 }
1429 }
1430
1431 if (!ShadowBase) {
1432 if (!ThreadLongMaybeUntagged)
1433 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1434
1435 // Get shadow base address by aligning RecordPtr up.
1436 // Note: this is not correct if the pointer is already aligned.
1437 // Runtime library will make sure this never happens.
1438 ShadowBase = IRB.CreateAdd(
1439 IRB.CreateOr(
1440 ThreadLongMaybeUntagged,
1441 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1442 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1443 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1444 }
1445}
1446
1447bool HWAddressSanitizer::instrumentLandingPads(
1448 SmallVectorImpl<Instruction *> &LandingPadVec) {
1449 for (auto *LP : LandingPadVec) {
1450 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1451 IRB.CreateCall(
1452 HwasanHandleVfork,
1454 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1455 }
1456 return true;
1457}
1458
1459bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1460 Value *StackTag, Value *UARTag,
1461 const DominatorTree &DT,
1462 const PostDominatorTree &PDT,
1463 const LoopInfo &LI) {
1464 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1465 // alloca addresses using that. Unfortunately, offsets are not known yet
1466 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1467 // temp, shift-OR it into each alloca address and xor with the retag mask.
1468 // This generates one extra instruction per alloca use.
1469 unsigned int I = 0;
1470
1471 for (auto &KV : SInfo.AllocasToInstrument) {
1472 auto N = I++;
1473 auto *AI = KV.first;
1474 memtag::AllocaInfo &Info = KV.second;
1476
1477 // Replace uses of the alloca with tagged address.
1478 Value *Tag = getAllocaTag(IRB, StackTag, N);
1479 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1480 Value *AINoTagLong = untagPointer(IRB, AILong);
1481 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1482 std::string Name =
1483 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1484 Replacement->setName(Name + ".hwasan");
1485
1486 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1487 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1488
1489 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1490
1491 auto HandleLifetime = [&](IntrinsicInst *II) {
1492 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1493 // set of assumptions we need to make about the lifetime. Without this we
1494 // would need to ensure that we can track the lifetime pointer to a
1495 // constant offset from the alloca, and would still need to change the
1496 // size to include the extra alignment we use for the untagging to make
1497 // the size consistent.
1498 //
1499 // The check for standard lifetime below makes sure that we have exactly
1500 // one set of start / end in any execution (i.e. the ends are not
1501 // reachable from each other), so this will not cause any problems.
1502 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1503 II->setArgOperand(1, AICast);
1504 };
1505 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1506 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1507
1508 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1509 auto *User = U.getUser();
1510 return User != AILong && User != AICast &&
1512 });
1513
1514 memtag::annotateDebugRecords(Info, retagMask(N));
1515
1516 auto TagEnd = [&](Instruction *Node) {
1517 IRB.SetInsertPoint(Node);
1518 // When untagging, use the `AlignedSize` because we need to set the tags
1519 // for the entire alloca to original. If we used `Size` here, we would
1520 // keep the last granule tagged, and store zero in the last byte of the
1521 // last granule, due to how short granules are implemented.
1522 tagAlloca(IRB, AI, UARTag, AlignedSize);
1523 };
1524 // Calls to functions that may return twice (e.g. setjmp) confuse the
1525 // postdominator analysis, and will leave us to keep memory tagged after
1526 // function return. Work around this by always untagging at every return
1527 // statement if return_twice functions are called.
1528 bool StandardLifetime =
1529 !SInfo.CallsReturnTwice &&
1530 SInfo.UnrecognizedLifetimes.empty() &&
1531 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1532 &LI, ClMaxLifetimes);
1533 if (DetectUseAfterScope && StandardLifetime) {
1534 IntrinsicInst *Start = Info.LifetimeStart[0];
1535 IRB.SetInsertPoint(Start->getNextNode());
1536 tagAlloca(IRB, AI, Tag, Size);
1537 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1538 SInfo.RetVec, TagEnd)) {
1539 for (auto *End : Info.LifetimeEnd)
1540 End->eraseFromParent();
1541 }
1542 } else {
1543 tagAlloca(IRB, AI, Tag, Size);
1544 for (auto *RI : SInfo.RetVec)
1545 TagEnd(RI);
1546 // We inserted tagging outside of the lifetimes, so we have to remove
1547 // them.
1548 for (auto &II : Info.LifetimeStart)
1549 II->eraseFromParent();
1550 for (auto &II : Info.LifetimeEnd)
1551 II->eraseFromParent();
1552 }
1553 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1554 }
1555 for (auto &I : SInfo.UnrecognizedLifetimes)
1556 I->eraseFromParent();
1557 return true;
1558}
1559
1561 bool Skip) {
1562 if (Skip) {
1563 ORE.emit([&]() {
1564 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1565 << "Skipped: F=" << ore::NV("Function", &F);
1566 });
1567 } else {
1568 ORE.emit([&]() {
1569 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1570 << "Sanitized: F=" << ore::NV("Function", &F);
1571 });
1572 }
1573}
1574
1575bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1577 auto SkipHot = [&]() {
1578 if (!ClHotPercentileCutoff.getNumOccurrences())
1579 return false;
1581 ProfileSummaryInfo *PSI =
1582 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1583 if (!PSI || !PSI->hasProfileSummary()) {
1584 ++NumNoProfileSummaryFuncs;
1585 return false;
1586 }
1587 return PSI->isFunctionHotInCallGraphNthPercentile(
1589 };
1590
1591 auto SkipRandom = [&]() {
1592 if (!ClRandomSkipRate.getNumOccurrences())
1593 return false;
1594 std::bernoulli_distribution D(ClRandomSkipRate);
1595 return !D(*Rng);
1596 };
1597
1598 bool Skip = SkipRandom() || SkipHot();
1600 return Skip;
1601}
1602
1603void HWAddressSanitizer::sanitizeFunction(Function &F,
1605 if (&F == HwasanCtorFunction)
1606 return;
1607
1608 // Do not apply any instrumentation for naked functions.
1609 if (F.hasFnAttribute(Attribute::Naked))
1610 return;
1611
1612 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1613 return;
1614
1615 if (F.empty())
1616 return;
1617
1618 NumTotalFuncs++;
1619
1622
1623 if (selectiveInstrumentationShouldSkip(F, FAM))
1624 return;
1625
1626 NumInstrumentedFuncs++;
1627
1628 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1629
1630 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1631 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1632 SmallVector<Instruction *, 8> LandingPadVec;
1634
1636 for (auto &Inst : instructions(F)) {
1637 if (InstrumentStack) {
1638 SIB.visit(ORE, Inst);
1639 }
1640
1641 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1642 LandingPadVec.push_back(&Inst);
1643
1644 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1645
1646 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1647 if (!ignoreMemIntrinsic(ORE, MI))
1648 IntrinToInstrument.push_back(MI);
1649 }
1650
1651 memtag::StackInfo &SInfo = SIB.get();
1652
1653 initializeCallbacks(*F.getParent());
1654
1655 if (!LandingPadVec.empty())
1656 instrumentLandingPads(LandingPadVec);
1657
1658 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1659 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1660 // __hwasan_personality_thunk is a no-op for functions without an
1661 // instrumented stack, so we can drop it.
1662 F.setPersonalityFn(nullptr);
1663 }
1664
1665 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1666 IntrinToInstrument.empty())
1667 return;
1668
1669 assert(!ShadowBase);
1670
1671 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1672 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1673 emitPrologue(EntryIRB,
1674 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1675 Mapping.withFrameRecord() &&
1676 !SInfo.AllocasToInstrument.empty());
1677
1678 if (!SInfo.AllocasToInstrument.empty()) {
1681 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1682 Value *StackTag = getStackBaseTag(EntryIRB);
1683 Value *UARTag = getUARTag(EntryIRB);
1684 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1685 }
1686
1687 // If we split the entry block, move any allocas that were originally in the
1688 // entry block back into the entry block so that they aren't treated as
1689 // dynamic allocas.
1690 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1691 InsertPt = F.getEntryBlock().begin();
1692 for (Instruction &I :
1693 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1694 if (auto *AI = dyn_cast<AllocaInst>(&I))
1695 if (isa<ConstantInt>(AI->getArraySize()))
1696 I.moveBefore(F.getEntryBlock(), InsertPt);
1697 }
1698 }
1699
1703 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1704 for (auto &Operand : OperandsToInstrument)
1705 instrumentMemAccess(Operand, DTU, LI);
1706 DTU.flush();
1707
1708 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1709 for (auto *Inst : IntrinToInstrument)
1710 instrumentMemIntrinsic(Inst);
1711 }
1712
1713 ShadowBase = nullptr;
1714 StackBaseTag = nullptr;
1715 CachedFP = nullptr;
1716}
1717
1718void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1719 assert(!UsePageAliases);
1720 Constant *Initializer = GV->getInitializer();
1721 uint64_t SizeInBytes =
1722 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1723 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1724 if (SizeInBytes != NewSize) {
1725 // Pad the initializer out to the next multiple of 16 bytes and add the
1726 // required short granule tag.
1727 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1728 Init.back() = Tag;
1730 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1731 }
1732
1733 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1734 GlobalValue::ExternalLinkage, Initializer,
1735 GV->getName() + ".hwasan");
1736 NewGV->copyAttributesFrom(GV);
1737 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1738 NewGV->copyMetadata(GV, 0);
1739 NewGV->setAlignment(
1740 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1741
1742 // It is invalid to ICF two globals that have different tags. In the case
1743 // where the size of the global is a multiple of the tag granularity the
1744 // contents of the globals may be the same but the tags (i.e. symbol values)
1745 // may be different, and the symbols are not considered during ICF. In the
1746 // case where the size is not a multiple of the granularity, the short granule
1747 // tags would discriminate two globals with different tags, but there would
1748 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1749 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1750 // granule tag in the last byte.
1751 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1752
1753 // Descriptor format (assuming little-endian):
1754 // bytes 0-3: relative address of global
1755 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1756 // it isn't, we create multiple descriptors)
1757 // byte 7: tag
1758 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1759 const uint64_t MaxDescriptorSize = 0xfffff0;
1760 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1761 DescriptorPos += MaxDescriptorSize) {
1762 auto *Descriptor =
1763 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1764 nullptr, GV->getName() + ".hwasan.descriptor");
1765 auto *GVRelPtr = ConstantExpr::getTrunc(
1768 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1769 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1770 ConstantInt::get(Int64Ty, DescriptorPos)),
1771 Int32Ty);
1772 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1773 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1774 Descriptor->setComdat(NewGV->getComdat());
1775 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1776 Descriptor->setSection("hwasan_globals");
1777 Descriptor->setMetadata(LLVMContext::MD_associated,
1779 appendToCompilerUsed(M, Descriptor);
1780 }
1781
1784 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1785 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1786 GV->getType());
1787 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1788 GV->getLinkage(), "", Aliasee, &M);
1789 Alias->setVisibility(GV->getVisibility());
1790 Alias->takeName(GV);
1791 GV->replaceAllUsesWith(Alias);
1792 GV->eraseFromParent();
1793}
1794
1795void HWAddressSanitizer::instrumentGlobals() {
1796 std::vector<GlobalVariable *> Globals;
1797 for (GlobalVariable &GV : M.globals()) {
1799 continue;
1800
1801 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1802 GV.isThreadLocal())
1803 continue;
1804
1805 // Common symbols can't have aliases point to them, so they can't be tagged.
1806 if (GV.hasCommonLinkage())
1807 continue;
1808
1809 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1810 // which would be broken both by adding tags and potentially by the extra
1811 // padding/alignment that we insert.
1812 if (GV.hasSection())
1813 continue;
1814
1815 Globals.push_back(&GV);
1816 }
1817
1818 MD5 Hasher;
1819 Hasher.update(M.getSourceFileName());
1820 MD5::MD5Result Hash;
1821 Hasher.final(Hash);
1822 uint8_t Tag = Hash[0];
1823
1824 assert(TagMaskByte >= 16);
1825
1826 for (GlobalVariable *GV : Globals) {
1827 // Don't allow globals to be tagged with something that looks like a
1828 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1829 // the fast path shadow-vs-address check succeeds.
1830 if (Tag < 16 || Tag > TagMaskByte)
1831 Tag = 16;
1832 instrumentGlobal(GV, Tag++);
1833 }
1834}
1835
1836void HWAddressSanitizer::instrumentPersonalityFunctions() {
1837 // We need to untag stack frames as we unwind past them. That is the job of
1838 // the personality function wrapper, which either wraps an existing
1839 // personality function or acts as a personality function on its own. Each
1840 // function that has a personality function or that can be unwound past has
1841 // its personality function changed to a thunk that calls the personality
1842 // function wrapper in the runtime.
1844 for (Function &F : M) {
1845 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1846 continue;
1847
1848 if (F.hasPersonalityFn()) {
1849 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1850 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1851 PersonalityFns[nullptr].push_back(&F);
1852 }
1853 }
1854
1855 if (PersonalityFns.empty())
1856 return;
1857
1858 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1859 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1860 PtrTy, PtrTy, PtrTy, PtrTy);
1861 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1862 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1863
1864 for (auto &P : PersonalityFns) {
1865 std::string ThunkName = kHwasanPersonalityThunkName;
1866 if (P.first)
1867 ThunkName += ("." + P.first->getName()).str();
1868 FunctionType *ThunkFnTy = FunctionType::get(
1869 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1870 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1871 cast<GlobalValue>(P.first)->hasLocalLinkage());
1872 auto *ThunkFn = Function::Create(ThunkFnTy,
1875 ThunkName, &M);
1876 if (!IsLocal) {
1877 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1878 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1879 }
1880
1881 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1882 IRBuilder<> IRB(BB);
1883 CallInst *WrapperCall = IRB.CreateCall(
1884 HwasanPersonalityWrapper,
1885 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1886 ThunkFn->getArg(3), ThunkFn->getArg(4),
1887 P.first ? P.first : Constant::getNullValue(PtrTy),
1888 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1889 WrapperCall->setTailCall();
1890 IRB.CreateRet(WrapperCall);
1891
1892 for (Function *F : P.second)
1893 F->setPersonalityFn(ThunkFn);
1894 }
1895}
1896
1897void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1898 bool InstrumentWithCalls) {
1899 // Start with defaults.
1900 Scale = kDefaultShadowScale;
1901 Kind = OffsetKind::kTls;
1902 WithFrameRecord = true;
1903
1904 // Tune for the target.
1905 if (TargetTriple.isOSFuchsia()) {
1906 // Fuchsia is always PIE, which means that the beginning of the address
1907 // space is always available.
1908 SetFixed(0);
1909 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1910 SetFixed(0);
1911 WithFrameRecord = false;
1912 }
1913
1914 WithFrameRecord = optOr(ClFrameRecords, WithFrameRecord);
1915
1916 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1917 Kind = optOr(ClMappingOffsetDynamic, Kind);
1918 if (ClMappingOffset.getNumOccurrences() > 0 &&
1919 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1920 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1921 SetFixed(ClMappingOffset);
1922 }
1923}
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:684
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1313
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static cl::opt< float > ClRandomSkipRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function."))
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden)
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< bool > ClFrameRecords("hwasan-with-frame-record", cl::desc("Use ring buffer for stack allocations"), cl::Hidden)
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cuttoff."))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< OffsetKind > ClMappingOffsetDynamic("hwasan-mapping-offset-dynamic", cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden, cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"), clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"), clEnumValN(OffsetKind::kTls, "tls", "Use TLS")))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:709
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2307
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2644
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2293
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2637
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2279
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:480
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:173
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:557
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
void setComdat(Comdat *C)
Definition: Globals.cpp:212
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:109
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:243
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:355
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:488
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1902
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2201
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:890
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:592
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2150
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:103
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1460
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2002
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1119
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1401
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2277
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2281
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1813
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1439
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2048
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1498
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1826
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1350
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2444
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2034
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1520
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:566
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2285
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2227
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1479
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1542
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:567
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1543
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:258
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:692
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:81
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:229
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:406
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:782
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:780
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:383
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:1006
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:928
bool isOSFuchsia() const
Definition: Triple.h:598
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:730
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:501
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
int getNumOccurrences() const
Definition: CommandLine.h:399
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1736
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
Value * incrementThreadLong(IRBuilder<> &IRB, Value *ThreadLong, unsigned int Inc)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
bool isLifetimeIntrinsic(Value *V)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1732
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4197
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec