LLVM 20.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/MD5.h"
67#include <optional>
68#include <random>
69
70using namespace llvm;
71
72#define DEBUG_TYPE "hwasan"
73
74const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
75const char kHwasanNoteName[] = "hwasan.note";
76const char kHwasanInitName[] = "__hwasan_init";
77const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
78
80 "__hwasan_shadow_memory_dynamic_address";
81
82// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
83static const size_t kNumberOfAccessSizes = 5;
84
85static const size_t kDefaultShadowScale = 4;
87 std::numeric_limits<uint64_t>::max();
88
89static const unsigned kShadowBaseAlignment = 32;
90
92 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
93 cl::desc("Prefix for memory access callbacks"),
94 cl::Hidden, cl::init("__hwasan_"));
95
97 "hwasan-kernel-mem-intrinsic-prefix",
98 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
99 cl::init(false));
100
102 "hwasan-instrument-with-calls",
103 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
104 cl::init(false));
105
106static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
107 cl::desc("instrument read instructions"),
108 cl::Hidden, cl::init(true));
109
110static cl::opt<bool>
111 ClInstrumentWrites("hwasan-instrument-writes",
112 cl::desc("instrument write instructions"), cl::Hidden,
113 cl::init(true));
114
116 "hwasan-instrument-atomics",
117 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
118 cl::init(true));
119
120static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
121 cl::desc("instrument byval arguments"),
122 cl::Hidden, cl::init(true));
123
124static cl::opt<bool>
125 ClRecover("hwasan-recover",
126 cl::desc("Enable recovery mode (continue-after-error)."),
127 cl::Hidden, cl::init(false));
128
129static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
130 cl::desc("instrument stack (allocas)"),
131 cl::Hidden, cl::init(true));
132
133static cl::opt<bool>
134 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
135 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
137
139 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
141 cl::desc("How many lifetime ends to handle for a single alloca."),
143
144static cl::opt<bool>
145 ClUseAfterScope("hwasan-use-after-scope",
146 cl::desc("detect use after scope within function"),
147 cl::Hidden, cl::init(true));
148
150 "hwasan-generate-tags-with-calls",
151 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
152 cl::init(false));
153
154static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
155 cl::Hidden, cl::init(false));
156
158 "hwasan-match-all-tag",
159 cl::desc("don't report bad accesses via pointers with this tag"),
160 cl::Hidden, cl::init(-1));
161
162static cl::opt<bool>
163 ClEnableKhwasan("hwasan-kernel",
164 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
165 cl::Hidden, cl::init(false));
166
167// These flags allow to change the shadow mapping and control how shadow memory
168// is accessed. The shadow mapping looks like:
169// Shadow = (Mem >> scale) + offset
170
172 ClMappingOffset("hwasan-mapping-offset",
173 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
174 cl::Hidden, cl::init(0));
175
176static cl::opt<bool>
177 ClWithIfunc("hwasan-with-ifunc",
178 cl::desc("Access dynamic shadow through an ifunc global on "
179 "platforms that support this"),
180 cl::Hidden, cl::init(false));
181
183 "hwasan-with-tls",
184 cl::desc("Access dynamic shadow through an thread-local pointer on "
185 "platforms that support this"),
186 cl::Hidden, cl::init(true));
187
188static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
189 cl::desc("Hot percentile cuttoff."));
190
191static cl::opt<float>
192 ClRandomSkipRate("hwasan-random-rate",
193 cl::desc("Probability value in the range [0.0, 1.0] "
194 "to keep instrumentation of a function."));
195
196STATISTIC(NumTotalFuncs, "Number of total funcs");
197STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
198STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
199
200// Mode for selecting how to insert frame record info into the stack ring
201// buffer.
203 // Do not record frame record info.
205
206 // Insert instructions into the prologue for storing into the stack ring
207 // buffer directly.
209
210 // Add a call to __hwasan_add_frame_record in the runtime.
212};
213
215 "hwasan-record-stack-history",
216 cl::desc("Record stack frames with tagged allocations in a thread-local "
217 "ring buffer"),
218 cl::values(clEnumVal(none, "Do not record stack ring history"),
219 clEnumVal(instr, "Insert instructions into the prologue for "
220 "storing into the stack ring buffer directly"),
221 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
222 "storing into the stack ring buffer")),
224
225static cl::opt<bool>
226 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
227 cl::desc("instrument memory intrinsics"),
228 cl::Hidden, cl::init(true));
229
230static cl::opt<bool>
231 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
232 cl::desc("instrument landing pads"), cl::Hidden,
233 cl::init(false));
234
236 "hwasan-use-short-granules",
237 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
238 cl::init(false));
239
241 "hwasan-instrument-personality-functions",
242 cl::desc("instrument personality functions"), cl::Hidden);
243
244static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
245 cl::desc("inline all checks"),
246 cl::Hidden, cl::init(false));
247
248static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
249 cl::desc("inline all checks"),
250 cl::Hidden, cl::init(false));
251
252// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
253static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
254 cl::desc("Use page aliasing in HWASan"),
255 cl::Hidden, cl::init(false));
256
257namespace {
258
259template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
260 return Opt.getNumOccurrences() ? Opt : Other;
261}
262
263bool shouldUsePageAliases(const Triple &TargetTriple) {
264 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
265}
266
267bool shouldInstrumentStack(const Triple &TargetTriple) {
268 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
269}
270
271bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
272 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
273}
274
275bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
276 return optOr(ClUseStackSafety, !DisableOptimization);
277}
278
279bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
280 bool DisableOptimization) {
281 return shouldInstrumentStack(TargetTriple) &&
282 mightUseStackSafetyAnalysis(DisableOptimization);
283}
284
285bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
286 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
287}
288
289/// An instrumentation pass implementing detection of addressability bugs
290/// using tagged pointers.
291class HWAddressSanitizer {
292public:
293 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
294 const StackSafetyGlobalInfo *SSI)
295 : M(M), SSI(SSI) {
296 this->Recover = optOr(ClRecover, Recover);
297 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
298 this->Rng = ClRandomSkipRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
299 : nullptr;
300
301 initializeModule();
302 }
303
304 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
305
306private:
307 struct ShadowTagCheckInfo {
308 Instruction *TagMismatchTerm = nullptr;
309 Value *PtrLong = nullptr;
310 Value *AddrLong = nullptr;
311 Value *PtrTag = nullptr;
312 Value *MemTag = nullptr;
313 };
314
315 bool selectiveInstrumentationShouldSkip(Function &F,
317 void initializeModule();
318 void createHwasanCtorComdat();
319
320 void initializeCallbacks(Module &M);
321
322 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
323
324 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
325 Value *getShadowNonTls(IRBuilder<> &IRB);
326
327 void untagPointerOperand(Instruction *I, Value *Addr);
328 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
329
330 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
331 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
332 DomTreeUpdater &DTU, LoopInfo *LI);
333 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
334 unsigned AccessSizeIndex,
335 Instruction *InsertBefore,
336 DomTreeUpdater &DTU, LoopInfo *LI);
337 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
338 unsigned AccessSizeIndex,
339 Instruction *InsertBefore, DomTreeUpdater &DTU,
340 LoopInfo *LI);
341 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
342 void instrumentMemIntrinsic(MemIntrinsic *MI);
343 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
344 LoopInfo *LI);
345 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
346 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
347 Value *Ptr);
348
351 const TargetLibraryInfo &TLI,
353
354 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
355 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
356 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
357 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
358 const DominatorTree &DT, const PostDominatorTree &PDT,
359 const LoopInfo &LI);
360 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
361 Value *getNextTagWithCall(IRBuilder<> &IRB);
362 Value *getStackBaseTag(IRBuilder<> &IRB);
363 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
364 Value *getUARTag(IRBuilder<> &IRB);
365
366 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
367 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
368 unsigned retagMask(unsigned AllocaNo);
369
370 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
371
372 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
373 void instrumentGlobals();
374
375 Value *getCachedFP(IRBuilder<> &IRB);
376 Value *getFrameRecordInfo(IRBuilder<> &IRB);
377
378 void instrumentPersonalityFunctions();
379
380 LLVMContext *C;
381 Module &M;
382 const StackSafetyGlobalInfo *SSI;
383 Triple TargetTriple;
384 std::unique_ptr<RandomNumberGenerator> Rng;
385
386 /// This struct defines the shadow mapping using the rule:
387 /// shadow = (mem >> Scale) + Offset.
388 /// If InGlobal is true, then
389 /// extern char __hwasan_shadow[];
390 /// shadow = (mem >> Scale) + &__hwasan_shadow
391 /// If InTls is true, then
392 /// extern char *__hwasan_tls;
393 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
394 ///
395 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
396 /// ring buffer for storing stack allocations on targets that support it.
397 struct ShadowMapping {
398 uint8_t Scale;
400 bool InGlobal;
401 bool InTls;
402 bool WithFrameRecord;
403
404 void init(Triple &TargetTriple, bool InstrumentWithCalls);
405 Align getObjectAlignment() const { return Align(1ULL << Scale); }
406 };
407
408 ShadowMapping Mapping;
409
410 Type *VoidTy = Type::getVoidTy(M.getContext());
411 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
412 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
413 Type *Int8Ty = Type::getInt8Ty(M.getContext());
414 Type *Int32Ty = Type::getInt32Ty(M.getContext());
415 Type *Int64Ty = Type::getInt64Ty(M.getContext());
416
417 bool CompileKernel;
418 bool Recover;
419 bool OutlinedChecks;
420 bool InlineFastPath;
421 bool UseShortGranules;
422 bool InstrumentLandingPads;
423 bool InstrumentWithCalls;
424 bool InstrumentStack;
425 bool InstrumentGlobals;
426 bool DetectUseAfterScope;
427 bool UsePageAliases;
428 bool UseMatchAllCallback;
429
430 std::optional<uint8_t> MatchAllTag;
431
432 unsigned PointerTagShift;
433 uint64_t TagMaskByte;
434
435 Function *HwasanCtorFunction;
436
437 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
438 FunctionCallee HwasanMemoryAccessCallbackSized[2];
439
440 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
441 FunctionCallee HwasanHandleVfork;
442
443 FunctionCallee HwasanTagMemoryFunc;
444 FunctionCallee HwasanGenerateTagFunc;
445 FunctionCallee HwasanRecordFrameRecordFunc;
446
447 Constant *ShadowGlobal;
448
449 Value *ShadowBase = nullptr;
450 Value *StackBaseTag = nullptr;
451 Value *CachedFP = nullptr;
452 GlobalValue *ThreadPtrGlobal = nullptr;
453};
454
455} // end anonymous namespace
456
459 // Return early if nosanitize_hwaddress module flag is present for the module.
460 if (checkIfAlreadyInstrumented(M, "nosanitize_hwaddress"))
461 return PreservedAnalyses::all();
462 const StackSafetyGlobalInfo *SSI = nullptr;
463 auto TargetTriple = llvm::Triple(M.getTargetTriple());
464 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
466
467 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
468 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
469 for (Function &F : M)
470 HWASan.sanitizeFunction(F, FAM);
471
473 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
474 // are incrementally updated throughout this pass whenever
475 // SplitBlockAndInsertIfThen is called.
479 // GlobalsAA is considered stateless and does not get invalidated unless
480 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
481 // make changes that require GlobalsAA to be invalidated.
482 PA.abandon<GlobalsAA>();
483 return PA;
484}
486 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
488 OS, MapClassName2PassName);
489 OS << '<';
490 if (Options.CompileKernel)
491 OS << "kernel;";
492 if (Options.Recover)
493 OS << "recover";
494 OS << '>';
495}
496
497void HWAddressSanitizer::createHwasanCtorComdat() {
498 std::tie(HwasanCtorFunction, std::ignore) =
501 /*InitArgTypes=*/{},
502 /*InitArgs=*/{},
503 // This callback is invoked when the functions are created the first
504 // time. Hook them into the global ctors list in that case:
505 [&](Function *Ctor, FunctionCallee) {
506 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
507 Ctor->setComdat(CtorComdat);
508 appendToGlobalCtors(M, Ctor, 0, Ctor);
509 });
510
511 // Create a note that contains pointers to the list of global
512 // descriptors. Adding a note to the output file will cause the linker to
513 // create a PT_NOTE program header pointing to the note that we can use to
514 // find the descriptor list starting from the program headers. A function
515 // provided by the runtime initializes the shadow memory for the globals by
516 // accessing the descriptor list via the note. The dynamic loader needs to
517 // call this function whenever a library is loaded.
518 //
519 // The reason why we use a note for this instead of a more conventional
520 // approach of having a global constructor pass a descriptor list pointer to
521 // the runtime is because of an order of initialization problem. With
522 // constructors we can encounter the following problematic scenario:
523 //
524 // 1) library A depends on library B and also interposes one of B's symbols
525 // 2) B's constructors are called before A's (as required for correctness)
526 // 3) during construction, B accesses one of its "own" globals (actually
527 // interposed by A) and triggers a HWASAN failure due to the initialization
528 // for A not having happened yet
529 //
530 // Even without interposition it is possible to run into similar situations in
531 // cases where two libraries mutually depend on each other.
532 //
533 // We only need one note per binary, so put everything for the note in a
534 // comdat. This needs to be a comdat with an .init_array section to prevent
535 // newer versions of lld from discarding the note.
536 //
537 // Create the note even if we aren't instrumenting globals. This ensures that
538 // binaries linked from object files with both instrumented and
539 // non-instrumented globals will end up with a note, even if a comdat from an
540 // object file with non-instrumented globals is selected. The note is harmless
541 // if the runtime doesn't support it, since it will just be ignored.
542 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
543
544 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
545 auto *Start =
546 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
547 nullptr, "__start_hwasan_globals");
548 Start->setVisibility(GlobalValue::HiddenVisibility);
549 auto *Stop =
550 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
551 nullptr, "__stop_hwasan_globals");
552 Stop->setVisibility(GlobalValue::HiddenVisibility);
553
554 // Null-terminated so actually 8 bytes, which are required in order to align
555 // the note properly.
556 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
557
558 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
559 Int32Ty, Int32Ty);
560 auto *Note =
561 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
563 Note->setSection(".note.hwasan.globals");
564 Note->setComdat(NoteComdat);
565 Note->setAlignment(Align(4));
566
567 // The pointers in the note need to be relative so that the note ends up being
568 // placed in rodata, which is the standard location for notes.
569 auto CreateRelPtr = [&](Constant *Ptr) {
573 Int32Ty);
574 };
575 Note->setInitializer(ConstantStruct::getAnon(
576 {ConstantInt::get(Int32Ty, 8), // n_namesz
577 ConstantInt::get(Int32Ty, 8), // n_descsz
578 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
579 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
581
582 // Create a zero-length global in hwasan_globals so that the linker will
583 // always create start and stop symbols.
584 auto *Dummy = new GlobalVariable(
585 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
586 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
587 Dummy->setSection("hwasan_globals");
588 Dummy->setComdat(NoteComdat);
589 Dummy->setMetadata(LLVMContext::MD_associated,
591 appendToCompilerUsed(M, Dummy);
592}
593
594/// Module-level initialization.
595///
596/// inserts a call to __hwasan_init to the module's constructor list.
597void HWAddressSanitizer::initializeModule() {
598 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
599 TargetTriple = Triple(M.getTargetTriple());
600
601 // x86_64 currently has two modes:
602 // - Intel LAM (default)
603 // - pointer aliasing (heap only)
604 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
605 UsePageAliases = shouldUsePageAliases(TargetTriple);
606 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
607 InstrumentStack = shouldInstrumentStack(TargetTriple);
608 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
609 PointerTagShift = IsX86_64 ? 57 : 56;
610 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
611
612 Mapping.init(TargetTriple, InstrumentWithCalls);
613
614 C = &(M.getContext());
615 IRBuilder<> IRB(*C);
616
617 HwasanCtorFunction = nullptr;
618
619 // Older versions of Android do not have the required runtime support for
620 // short granules, global or personality function instrumentation. On other
621 // platforms we currently require using the latest version of the runtime.
622 bool NewRuntime =
623 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
624
625 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
626 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
627 TargetTriple.isOSBinFormatELF() &&
628 !optOr(ClInlineAllChecks, Recover);
629
630 // These platforms may prefer less inlining to reduce binary size.
631 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
632 TargetTriple.isOSFuchsia()));
633
634 if (ClMatchAllTag.getNumOccurrences()) {
635 if (ClMatchAllTag != -1) {
636 MatchAllTag = ClMatchAllTag & 0xFF;
637 }
638 } else if (CompileKernel) {
639 MatchAllTag = 0xFF;
640 }
641 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
642
643 // If we don't have personality function support, fall back to landing pads.
644 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
645
646 InstrumentGlobals =
647 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
648
649 if (!CompileKernel) {
650 createHwasanCtorComdat();
651
652 if (InstrumentGlobals)
653 instrumentGlobals();
654
655 bool InstrumentPersonalityFunctions =
656 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
657 if (InstrumentPersonalityFunctions)
658 instrumentPersonalityFunctions();
659 }
660
661 if (!TargetTriple.isAndroid()) {
662 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
663 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
665 "__hwasan_tls", nullptr,
668 return GV;
669 });
670 ThreadPtrGlobal = cast<GlobalVariable>(C);
671 }
672}
673
674void HWAddressSanitizer::initializeCallbacks(Module &M) {
675 IRBuilder<> IRB(*C);
676 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
677 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
678 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
679 *HwasanMemsetFnTy;
680 if (UseMatchAllCallback) {
681 HwasanMemoryAccessCallbackSizedFnTy =
682 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
683 HwasanMemoryAccessCallbackFnTy =
684 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
685 HwasanMemTransferFnTy =
686 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
687 HwasanMemsetFnTy =
688 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
689 } else {
690 HwasanMemoryAccessCallbackSizedFnTy =
691 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
692 HwasanMemoryAccessCallbackFnTy =
693 FunctionType::get(VoidTy, {IntptrTy}, false);
694 HwasanMemTransferFnTy =
695 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
696 HwasanMemsetFnTy =
697 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
698 }
699
700 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
701 const std::string TypeStr = AccessIsWrite ? "store" : "load";
702 const std::string EndingStr = Recover ? "_noabort" : "";
703
704 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
705 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
706 HwasanMemoryAccessCallbackSizedFnTy);
707
708 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
709 AccessSizeIndex++) {
710 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
711 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
712 itostr(1ULL << AccessSizeIndex) +
713 MatchAllStr + EndingStr,
714 HwasanMemoryAccessCallbackFnTy);
715 }
716 }
717
718 const std::string MemIntrinCallbackPrefix =
719 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
720 ? std::string("")
722
723 HwasanMemmove = M.getOrInsertFunction(
724 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
725 HwasanMemcpy = M.getOrInsertFunction(
726 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
727 HwasanMemset = M.getOrInsertFunction(
728 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
729
730 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
731 PtrTy, Int8Ty, IntptrTy);
732 HwasanGenerateTagFunc =
733 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
734
735 HwasanRecordFrameRecordFunc =
736 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
737
738 ShadowGlobal =
739 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
740
741 HwasanHandleVfork =
742 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
743}
744
745Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
746 // An empty inline asm with input reg == output reg.
747 // An opaque no-op cast, basically.
748 // This prevents code bloat as a result of rematerializing trivial definitions
749 // such as constants or global addresses at every load and store.
750 InlineAsm *Asm =
751 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
752 StringRef(""), StringRef("=r,0"),
753 /*hasSideEffects=*/false);
754 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
755}
756
757Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
758 return getOpaqueNoopCast(IRB, ShadowGlobal);
759}
760
761Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
762 if (Mapping.Offset != kDynamicShadowSentinel)
763 return getOpaqueNoopCast(
765 ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
766
767 if (Mapping.InGlobal)
768 return getDynamicShadowIfunc(IRB);
769
770 Value *GlobalDynamicAddress =
773 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
774}
775
776bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
777 Value *Ptr) {
778 // Do not instrument accesses from different address spaces; we cannot deal
779 // with them.
780 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
781 if (PtrTy->getPointerAddressSpace() != 0)
782 return true;
783
784 // Ignore swifterror addresses.
785 // swifterror memory addresses are mem2reg promoted by instruction
786 // selection. As such they cannot have regular uses like an instrumentation
787 // function and it makes no sense to track them as memory.
788 if (Ptr->isSwiftError())
789 return true;
790
791 if (findAllocaForValue(Ptr)) {
792 if (!InstrumentStack)
793 return true;
794 if (SSI && SSI->stackAccessIsSafe(*Inst))
795 return true;
796 }
797
798 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
799 if (!InstrumentGlobals)
800 return true;
801 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
802 }
803
804 return false;
805}
806
807bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
808 Instruction *Inst, Value *Ptr) {
809 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
810 if (Ignored) {
811 ORE.emit(
812 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
813 } else {
814 ORE.emit([&]() {
815 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
816 });
817 }
818 return Ignored;
819}
820
821void HWAddressSanitizer::getInterestingMemoryOperands(
823 const TargetLibraryInfo &TLI,
825 // Skip memory accesses inserted by another instrumentation.
826 if (I->hasMetadata(LLVMContext::MD_nosanitize))
827 return;
828
829 // Do not instrument the load fetching the dynamic shadow address.
830 if (ShadowBase == I)
831 return;
832
833 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
834 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
835 return;
836 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
837 LI->getType(), LI->getAlign());
838 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
839 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
840 return;
841 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
842 SI->getValueOperand()->getType(), SI->getAlign());
843 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
844 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
845 return;
846 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
847 RMW->getValOperand()->getType(), std::nullopt);
848 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
849 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
850 return;
851 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
852 XCHG->getCompareOperand()->getType(),
853 std::nullopt);
854 } else if (auto *CI = dyn_cast<CallInst>(I)) {
855 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
856 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
857 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
858 continue;
859 Type *Ty = CI->getParamByValType(ArgNo);
860 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
861 }
863 }
864}
865
867 if (LoadInst *LI = dyn_cast<LoadInst>(I))
868 return LI->getPointerOperandIndex();
869 if (StoreInst *SI = dyn_cast<StoreInst>(I))
870 return SI->getPointerOperandIndex();
871 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
872 return RMW->getPointerOperandIndex();
873 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
874 return XCHG->getPointerOperandIndex();
875 report_fatal_error("Unexpected instruction");
876 return -1;
877}
878
880 size_t Res = llvm::countr_zero(TypeSize / 8);
882 return Res;
883}
884
885void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
886 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
887 TargetTriple.isRISCV64())
888 return;
889
890 IRBuilder<> IRB(I);
891 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
892 Value *UntaggedPtr =
893 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
894 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
895}
896
897Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
898 // Mem >> Scale
899 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
900 if (Mapping.Offset == 0)
901 return IRB.CreateIntToPtr(Shadow, PtrTy);
902 // (Mem >> Scale) + Offset
903 return IRB.CreatePtrAdd(ShadowBase, Shadow);
904}
905
906int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
907 unsigned AccessSizeIndex) {
908 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
909 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
910 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
911 (Recover << HWASanAccessInfo::RecoverShift) |
912 (IsWrite << HWASanAccessInfo::IsWriteShift) |
913 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
914}
915
916HWAddressSanitizer::ShadowTagCheckInfo
917HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
918 DomTreeUpdater &DTU, LoopInfo *LI) {
919 ShadowTagCheckInfo R;
920
921 IRBuilder<> IRB(InsertBefore);
922
923 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
924 R.PtrTag =
925 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
926 R.AddrLong = untagPointer(IRB, R.PtrLong);
927 Value *Shadow = memToShadow(R.AddrLong, IRB);
928 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
929 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
930
931 if (MatchAllTag.has_value()) {
932 Value *TagNotIgnored = IRB.CreateICmpNE(
933 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
934 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
935 }
936
937 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
938 TagMismatch, InsertBefore, false,
939 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
940
941 return R;
942}
943
944void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
945 unsigned AccessSizeIndex,
946 Instruction *InsertBefore,
947 DomTreeUpdater &DTU,
948 LoopInfo *LI) {
949 assert(!UsePageAliases);
950 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
951
952 if (InlineFastPath)
953 InsertBefore =
954 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
955
956 IRBuilder<> IRB(InsertBefore);
958 bool useFixedShadowIntrinsic = false;
959 // The memaccess fixed shadow intrinsic is only supported on AArch64,
960 // which allows a 16-bit immediate to be left-shifted by 32.
961 // Since kShadowBaseAlignment == 32, and Linux by default will not
962 // mmap above 48-bits, practically any valid shadow offset is
963 // representable.
964 // In particular, an offset of 4TB (1024 << 32) is representable, and
965 // ought to be good enough for anybody.
966 if (TargetTriple.isAArch64() && Mapping.Offset != kDynamicShadowSentinel) {
967 uint16_t offset_shifted = Mapping.Offset >> 32;
968 useFixedShadowIntrinsic = (uint64_t)offset_shifted << 32 == Mapping.Offset;
969 }
970
971 if (useFixedShadowIntrinsic)
972 IRB.CreateCall(
974 M, UseShortGranules
975 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
976 : Intrinsic::hwasan_check_memaccess_fixedshadow),
977 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
978 ConstantInt::get(Int64Ty, Mapping.Offset)});
979 else
981 M, UseShortGranules
982 ? Intrinsic::hwasan_check_memaccess_shortgranules
983 : Intrinsic::hwasan_check_memaccess),
984 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
985}
986
987void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
988 unsigned AccessSizeIndex,
989 Instruction *InsertBefore,
990 DomTreeUpdater &DTU,
991 LoopInfo *LI) {
992 assert(!UsePageAliases);
993 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
994
995 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
996
997 IRBuilder<> IRB(TCI.TagMismatchTerm);
998 Value *OutOfShortGranuleTagRange =
999 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
1000 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1001 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
1002 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1003
1004 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1005 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1006 PtrLowBits = IRB.CreateAdd(
1007 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1008 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1009 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1011 LI, CheckFailTerm->getParent());
1012
1013 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1014 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1015 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1016 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1017 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1018 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1020 LI, CheckFailTerm->getParent());
1021
1022 IRB.SetInsertPoint(CheckFailTerm);
1023 InlineAsm *Asm;
1024 switch (TargetTriple.getArch()) {
1025 case Triple::x86_64:
1026 // The signal handler will find the data address in rdi.
1028 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1029 "int3\nnopl " +
1030 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1031 "(%rax)",
1032 "{rdi}",
1033 /*hasSideEffects=*/true);
1034 break;
1035 case Triple::aarch64:
1036 case Triple::aarch64_be:
1037 // The signal handler will find the data address in x0.
1039 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1040 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1041 "{x0}",
1042 /*hasSideEffects=*/true);
1043 break;
1044 case Triple::riscv64:
1045 // The signal handler will find the data address in x10.
1047 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1048 "ebreak\naddiw x0, x11, " +
1049 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1050 "{x10}",
1051 /*hasSideEffects=*/true);
1052 break;
1053 default:
1054 report_fatal_error("unsupported architecture");
1055 }
1056 IRB.CreateCall(Asm, TCI.PtrLong);
1057 if (Recover)
1058 cast<BranchInst>(CheckFailTerm)
1059 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1060}
1061
1062bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1063 MemIntrinsic *MI) {
1064 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1065 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1066 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1067 }
1068 if (isa<MemSetInst>(MI))
1069 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1070 return false;
1071}
1072
1073void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1074 IRBuilder<> IRB(MI);
1075 if (isa<MemTransferInst>(MI)) {
1077 MI->getOperand(0), MI->getOperand(1),
1078 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1079
1080 if (UseMatchAllCallback)
1081 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1082 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1083 } else if (isa<MemSetInst>(MI)) {
1085 MI->getOperand(0),
1086 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1087 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1088 if (UseMatchAllCallback)
1089 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1090 IRB.CreateCall(HwasanMemset, Args);
1091 }
1092 MI->eraseFromParent();
1093}
1094
1095bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1096 DomTreeUpdater &DTU,
1097 LoopInfo *LI) {
1098 Value *Addr = O.getPtr();
1099
1100 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1101
1102 if (O.MaybeMask)
1103 return false; // FIXME
1104
1105 IRBuilder<> IRB(O.getInsn());
1106 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1107 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1108 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1109 *O.Alignment >= O.TypeStoreSize / 8)) {
1110 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1111 if (InstrumentWithCalls) {
1113 if (UseMatchAllCallback)
1114 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1115 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1116 Args);
1117 } else if (OutlinedChecks) {
1118 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1119 DTU, LI);
1120 } else {
1121 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1122 DTU, LI);
1123 }
1124 } else {
1126 IRB.CreatePointerCast(Addr, IntptrTy),
1127 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1128 ConstantInt::get(IntptrTy, 8))};
1129 if (UseMatchAllCallback)
1130 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1131 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1132 }
1133 untagPointerOperand(O.getInsn(), Addr);
1134
1135 return true;
1136}
1137
1138void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1139 size_t Size) {
1140 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1141 if (!UseShortGranules)
1142 Size = AlignedSize;
1143
1144 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1145 if (InstrumentWithCalls) {
1146 IRB.CreateCall(HwasanTagMemoryFunc,
1147 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1148 ConstantInt::get(IntptrTy, AlignedSize)});
1149 } else {
1150 size_t ShadowSize = Size >> Mapping.Scale;
1151 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1152 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1153 // If this memset is not inlined, it will be intercepted in the hwasan
1154 // runtime library. That's OK, because the interceptor skips the checks if
1155 // the address is in the shadow region.
1156 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1157 // llvm.memset right here into either a sequence of stores, or a call to
1158 // hwasan_tag_memory.
1159 if (ShadowSize)
1160 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1161 if (Size != AlignedSize) {
1162 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1163 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1164 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1165 IRB.CreateStore(
1166 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1167 AlignedSize - 1));
1168 }
1169 }
1170}
1171
1172unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1173 if (TargetTriple.getArch() == Triple::x86_64)
1174 return AllocaNo & TagMaskByte;
1175
1176 // A list of 8-bit numbers that have at most one run of non-zero bits.
1177 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1178 // masks.
1179 // The list does not include the value 255, which is used for UAR.
1180 //
1181 // Because we are more likely to use earlier elements of this list than later
1182 // ones, it is sorted in increasing order of probability of collision with a
1183 // mask allocated (temporally) nearby. The program that generated this list
1184 // can be found at:
1185 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1186 static const unsigned FastMasks[] = {
1187 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1188 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1189 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1190 return FastMasks[AllocaNo % std::size(FastMasks)];
1191}
1192
1193Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1194 if (TagMaskByte == 0xFF)
1195 return OldTag; // No need to clear the tag byte.
1196 return IRB.CreateAnd(OldTag,
1197 ConstantInt::get(OldTag->getType(), TagMaskByte));
1198}
1199
1200Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1201 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1202}
1203
1204Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1206 return nullptr;
1207 if (StackBaseTag)
1208 return StackBaseTag;
1209 // Extract some entropy from the stack pointer for the tags.
1210 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1211 // between functions).
1212 Value *FramePointerLong = getCachedFP(IRB);
1213 Value *StackTag =
1214 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1215 IRB.CreateLShr(FramePointerLong, 20)));
1216 StackTag->setName("hwasan.stack.base.tag");
1217 return StackTag;
1218}
1219
1220Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1221 unsigned AllocaNo) {
1223 return getNextTagWithCall(IRB);
1224 return IRB.CreateXor(
1225 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1226}
1227
1228Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1229 Value *FramePointerLong = getCachedFP(IRB);
1230 Value *UARTag =
1231 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1232
1233 UARTag->setName("hwasan.uar.tag");
1234 return UARTag;
1235}
1236
1237// Add a tag to an address.
1238Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1239 Value *PtrLong, Value *Tag) {
1240 assert(!UsePageAliases);
1241 Value *TaggedPtrLong;
1242 if (CompileKernel) {
1243 // Kernel addresses have 0xFF in the most significant byte.
1244 Value *ShiftedTag =
1245 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1246 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1247 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1248 } else {
1249 // Userspace can simply do OR (tag << PointerTagShift);
1250 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1251 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1252 }
1253 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1254}
1255
1256// Remove tag from an address.
1257Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1258 assert(!UsePageAliases);
1259 Value *UntaggedPtrLong;
1260 if (CompileKernel) {
1261 // Kernel addresses have 0xFF in the most significant byte.
1262 UntaggedPtrLong =
1263 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1264 TagMaskByte << PointerTagShift));
1265 } else {
1266 // Userspace addresses have 0x00.
1267 UntaggedPtrLong = IRB.CreateAnd(
1268 PtrLong, ConstantInt::get(PtrLong->getType(),
1269 ~(TagMaskByte << PointerTagShift)));
1270 }
1271 return UntaggedPtrLong;
1272}
1273
1274Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1275 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1276 // in Bionic's libc/platform/bionic/tls_defines.h.
1277 constexpr int SanitizerSlot = 6;
1278 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1279 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1280 return ThreadPtrGlobal;
1281}
1282
1283Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1284 if (!CachedFP)
1285 CachedFP = memtag::getFP(IRB);
1286 return CachedFP;
1287}
1288
1289Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1290 // Prepare ring buffer data.
1291 Value *PC = memtag::getPC(TargetTriple, IRB);
1292 Value *FP = getCachedFP(IRB);
1293
1294 // Mix FP and PC.
1295 // Assumptions:
1296 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1297 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1298 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1299 // 0xFFFFPPPPPPPPPPPP
1300 //
1301 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1302 // prefer FP-relative offsets for functions compiled with HWASan.
1303 FP = IRB.CreateShl(FP, 44);
1304 return IRB.CreateOr(PC, FP);
1305}
1306
1307void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1308 if (!Mapping.InTls)
1309 ShadowBase = getShadowNonTls(IRB);
1310 else if (!WithFrameRecord && TargetTriple.isAndroid())
1311 ShadowBase = getDynamicShadowIfunc(IRB);
1312
1313 if (!WithFrameRecord && ShadowBase)
1314 return;
1315
1316 Value *SlotPtr = nullptr;
1317 Value *ThreadLong = nullptr;
1318 Value *ThreadLongMaybeUntagged = nullptr;
1319
1320 auto getThreadLongMaybeUntagged = [&]() {
1321 if (!SlotPtr)
1322 SlotPtr = getHwasanThreadSlotPtr(IRB);
1323 if (!ThreadLong)
1324 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1325 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1326 // TBI.
1327 return TargetTriple.isAArch64() ? ThreadLong
1328 : untagPointer(IRB, ThreadLong);
1329 };
1330
1331 if (WithFrameRecord) {
1332 switch (ClRecordStackHistory) {
1333 case libcall: {
1334 // Emit a runtime call into hwasan rather than emitting instructions for
1335 // recording stack history.
1336 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1337 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1338 break;
1339 }
1340 case instr: {
1341 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1342
1343 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1344
1345 // Store data to ring buffer.
1346 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1347 Value *RecordPtr =
1348 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1349 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1350
1351 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1352 // buffer in pages, it must be a power of two, and the start of the buffer
1353 // must be aligned by twice that much. Therefore wrap around of the ring
1354 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1355 // The use of AShr instead of LShr is due to
1356 // https://bugs.llvm.org/show_bug.cgi?id=39030
1357 // Runtime library makes sure not to use the highest bit.
1358 //
1359 // Mechanical proof of this address calculation can be found at:
1360 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/prove_hwasanwrap.smt2
1361 //
1362 // Example of the wrap case for N = 1
1363 // Pointer: 0x01AAAAAAAAAAAFF8
1364 // +
1365 // 0x0000000000000008
1366 // =
1367 // 0x01AAAAAAAAAAB000
1368 // &
1369 // WrapMask: 0xFFFFFFFFFFFFF000
1370 // =
1371 // 0x01AAAAAAAAAAA000
1372 //
1373 // Then the WrapMask will be a no-op until the next wrap case.
1374 Value *WrapMask = IRB.CreateXor(
1375 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1376 ConstantInt::get(IntptrTy, (uint64_t)-1));
1377 Value *ThreadLongNew = IRB.CreateAnd(
1378 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1379 IRB.CreateStore(ThreadLongNew, SlotPtr);
1380 break;
1381 }
1382 case none: {
1384 "A stack history recording mode should've been selected.");
1385 }
1386 }
1387 }
1388
1389 if (!ShadowBase) {
1390 if (!ThreadLongMaybeUntagged)
1391 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1392
1393 // Get shadow base address by aligning RecordPtr up.
1394 // Note: this is not correct if the pointer is already aligned.
1395 // Runtime library will make sure this never happens.
1396 ShadowBase = IRB.CreateAdd(
1397 IRB.CreateOr(
1398 ThreadLongMaybeUntagged,
1399 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1400 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1401 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1402 }
1403}
1404
1405bool HWAddressSanitizer::instrumentLandingPads(
1406 SmallVectorImpl<Instruction *> &LandingPadVec) {
1407 for (auto *LP : LandingPadVec) {
1408 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1409 IRB.CreateCall(
1410 HwasanHandleVfork,
1412 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1413 }
1414 return true;
1415}
1416
1417bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1418 Value *StackTag, Value *UARTag,
1419 const DominatorTree &DT,
1420 const PostDominatorTree &PDT,
1421 const LoopInfo &LI) {
1422 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1423 // alloca addresses using that. Unfortunately, offsets are not known yet
1424 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1425 // temp, shift-OR it into each alloca address and xor with the retag mask.
1426 // This generates one extra instruction per alloca use.
1427 unsigned int I = 0;
1428
1429 for (auto &KV : SInfo.AllocasToInstrument) {
1430 auto N = I++;
1431 auto *AI = KV.first;
1432 memtag::AllocaInfo &Info = KV.second;
1434
1435 // Replace uses of the alloca with tagged address.
1436 Value *Tag = getAllocaTag(IRB, StackTag, N);
1437 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1438 Value *AINoTagLong = untagPointer(IRB, AILong);
1439 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1440 std::string Name =
1441 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1442 Replacement->setName(Name + ".hwasan");
1443
1444 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1445 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1446
1447 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1448
1449 auto HandleLifetime = [&](IntrinsicInst *II) {
1450 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1451 // set of assumptions we need to make about the lifetime. Without this we
1452 // would need to ensure that we can track the lifetime pointer to a
1453 // constant offset from the alloca, and would still need to change the
1454 // size to include the extra alignment we use for the untagging to make
1455 // the size consistent.
1456 //
1457 // The check for standard lifetime below makes sure that we have exactly
1458 // one set of start / end in any execution (i.e. the ends are not
1459 // reachable from each other), so this will not cause any problems.
1460 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1461 II->setArgOperand(1, AICast);
1462 };
1463 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1464 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1465
1466 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1467 auto *User = U.getUser();
1468 return User != AILong && User != AICast &&
1470 });
1471
1472 memtag::annotateDebugRecords(Info, retagMask(N));
1473
1474 auto TagEnd = [&](Instruction *Node) {
1475 IRB.SetInsertPoint(Node);
1476 // When untagging, use the `AlignedSize` because we need to set the tags
1477 // for the entire alloca to original. If we used `Size` here, we would
1478 // keep the last granule tagged, and store zero in the last byte of the
1479 // last granule, due to how short granules are implemented.
1480 tagAlloca(IRB, AI, UARTag, AlignedSize);
1481 };
1482 // Calls to functions that may return twice (e.g. setjmp) confuse the
1483 // postdominator analysis, and will leave us to keep memory tagged after
1484 // function return. Work around this by always untagging at every return
1485 // statement if return_twice functions are called.
1486 bool StandardLifetime =
1487 !SInfo.CallsReturnTwice &&
1488 SInfo.UnrecognizedLifetimes.empty() &&
1489 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1490 &LI, ClMaxLifetimes);
1491 if (DetectUseAfterScope && StandardLifetime) {
1492 IntrinsicInst *Start = Info.LifetimeStart[0];
1493 IRB.SetInsertPoint(Start->getNextNode());
1494 tagAlloca(IRB, AI, Tag, Size);
1495 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1496 SInfo.RetVec, TagEnd)) {
1497 for (auto *End : Info.LifetimeEnd)
1498 End->eraseFromParent();
1499 }
1500 } else {
1501 tagAlloca(IRB, AI, Tag, Size);
1502 for (auto *RI : SInfo.RetVec)
1503 TagEnd(RI);
1504 // We inserted tagging outside of the lifetimes, so we have to remove
1505 // them.
1506 for (auto &II : Info.LifetimeStart)
1507 II->eraseFromParent();
1508 for (auto &II : Info.LifetimeEnd)
1509 II->eraseFromParent();
1510 }
1511 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1512 }
1513 for (auto &I : SInfo.UnrecognizedLifetimes)
1514 I->eraseFromParent();
1515 return true;
1516}
1517
1519 bool Skip) {
1520 if (Skip) {
1521 ORE.emit([&]() {
1522 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1523 << "Skipped: F=" << ore::NV("Function", &F);
1524 });
1525 } else {
1526 ORE.emit([&]() {
1527 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1528 << "Sanitized: F=" << ore::NV("Function", &F);
1529 });
1530 }
1531}
1532
1533bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1535 auto SkipHot = [&]() {
1536 if (!ClHotPercentileCutoff.getNumOccurrences())
1537 return false;
1539 ProfileSummaryInfo *PSI =
1540 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1541 if (!PSI || !PSI->hasProfileSummary()) {
1542 ++NumNoProfileSummaryFuncs;
1543 return false;
1544 }
1545 return PSI->isFunctionHotInCallGraphNthPercentile(
1547 };
1548
1549 auto SkipRandom = [&]() {
1550 if (!ClRandomSkipRate.getNumOccurrences())
1551 return false;
1552 std::bernoulli_distribution D(ClRandomSkipRate);
1553 return !D(*Rng);
1554 };
1555
1556 bool Skip = SkipRandom() || SkipHot();
1558 return Skip;
1559}
1560
1561void HWAddressSanitizer::sanitizeFunction(Function &F,
1563 if (&F == HwasanCtorFunction)
1564 return;
1565
1566 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1567 return;
1568
1569 if (F.empty())
1570 return;
1571
1572 NumTotalFuncs++;
1573
1576
1577 if (selectiveInstrumentationShouldSkip(F, FAM))
1578 return;
1579
1580 NumInstrumentedFuncs++;
1581
1582 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1583
1584 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1585 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1586 SmallVector<Instruction *, 8> LandingPadVec;
1588
1589 memtag::StackInfoBuilder SIB(SSI);
1590 for (auto &Inst : instructions(F)) {
1591 if (InstrumentStack) {
1592 SIB.visit(Inst);
1593 }
1594
1595 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1596 LandingPadVec.push_back(&Inst);
1597
1598 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1599
1600 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1601 if (!ignoreMemIntrinsic(ORE, MI))
1602 IntrinToInstrument.push_back(MI);
1603 }
1604
1605 memtag::StackInfo &SInfo = SIB.get();
1606
1607 initializeCallbacks(*F.getParent());
1608
1609 if (!LandingPadVec.empty())
1610 instrumentLandingPads(LandingPadVec);
1611
1612 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1613 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1614 // __hwasan_personality_thunk is a no-op for functions without an
1615 // instrumented stack, so we can drop it.
1616 F.setPersonalityFn(nullptr);
1617 }
1618
1619 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1620 IntrinToInstrument.empty())
1621 return;
1622
1623 assert(!ShadowBase);
1624
1625 // Remove memory attributes that are about to become invalid.
1626 // HWASan checks read from shadow, which invalidates memory(argmem: *)
1627 // Short granule checks on function arguments read from the argument memory
1628 // (last byte of the granule), which invalidates writeonly.
1629 F.removeFnAttr(llvm::Attribute::Memory);
1630 for (auto &A : F.args())
1631 A.removeAttr(llvm::Attribute::WriteOnly);
1632
1633 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1634 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1635 emitPrologue(EntryIRB,
1636 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1637 Mapping.WithFrameRecord &&
1638 !SInfo.AllocasToInstrument.empty());
1639
1640 if (!SInfo.AllocasToInstrument.empty()) {
1643 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1644 Value *StackTag = getStackBaseTag(EntryIRB);
1645 Value *UARTag = getUARTag(EntryIRB);
1646 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1647 }
1648
1649 // If we split the entry block, move any allocas that were originally in the
1650 // entry block back into the entry block so that they aren't treated as
1651 // dynamic allocas.
1652 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1653 InsertPt = F.getEntryBlock().begin();
1654 for (Instruction &I :
1655 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1656 if (auto *AI = dyn_cast<AllocaInst>(&I))
1657 if (isa<ConstantInt>(AI->getArraySize()))
1658 I.moveBefore(F.getEntryBlock(), InsertPt);
1659 }
1660 }
1661
1665 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1666 for (auto &Operand : OperandsToInstrument)
1667 instrumentMemAccess(Operand, DTU, LI);
1668 DTU.flush();
1669
1670 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1671 for (auto *Inst : IntrinToInstrument)
1672 instrumentMemIntrinsic(Inst);
1673 }
1674
1675 ShadowBase = nullptr;
1676 StackBaseTag = nullptr;
1677 CachedFP = nullptr;
1678}
1679
1680void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1681 assert(!UsePageAliases);
1682 Constant *Initializer = GV->getInitializer();
1683 uint64_t SizeInBytes =
1684 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1685 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1686 if (SizeInBytes != NewSize) {
1687 // Pad the initializer out to the next multiple of 16 bytes and add the
1688 // required short granule tag.
1689 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1690 Init.back() = Tag;
1692 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1693 }
1694
1695 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1696 GlobalValue::ExternalLinkage, Initializer,
1697 GV->getName() + ".hwasan");
1698 NewGV->copyAttributesFrom(GV);
1699 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1700 NewGV->copyMetadata(GV, 0);
1701 NewGV->setAlignment(
1702 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1703
1704 // It is invalid to ICF two globals that have different tags. In the case
1705 // where the size of the global is a multiple of the tag granularity the
1706 // contents of the globals may be the same but the tags (i.e. symbol values)
1707 // may be different, and the symbols are not considered during ICF. In the
1708 // case where the size is not a multiple of the granularity, the short granule
1709 // tags would discriminate two globals with different tags, but there would
1710 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1711 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1712 // granule tag in the last byte.
1713 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1714
1715 // Descriptor format (assuming little-endian):
1716 // bytes 0-3: relative address of global
1717 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1718 // it isn't, we create multiple descriptors)
1719 // byte 7: tag
1720 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1721 const uint64_t MaxDescriptorSize = 0xfffff0;
1722 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1723 DescriptorPos += MaxDescriptorSize) {
1724 auto *Descriptor =
1725 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1726 nullptr, GV->getName() + ".hwasan.descriptor");
1727 auto *GVRelPtr = ConstantExpr::getTrunc(
1730 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1731 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1732 ConstantInt::get(Int64Ty, DescriptorPos)),
1733 Int32Ty);
1734 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1735 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1736 Descriptor->setComdat(NewGV->getComdat());
1737 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1738 Descriptor->setSection("hwasan_globals");
1739 Descriptor->setMetadata(LLVMContext::MD_associated,
1741 appendToCompilerUsed(M, Descriptor);
1742 }
1743
1746 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1747 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1748 GV->getType());
1749 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1750 GV->getLinkage(), "", Aliasee, &M);
1751 Alias->setVisibility(GV->getVisibility());
1752 Alias->takeName(GV);
1753 GV->replaceAllUsesWith(Alias);
1754 GV->eraseFromParent();
1755}
1756
1757void HWAddressSanitizer::instrumentGlobals() {
1758 std::vector<GlobalVariable *> Globals;
1759 for (GlobalVariable &GV : M.globals()) {
1761 continue;
1762
1763 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1764 GV.isThreadLocal())
1765 continue;
1766
1767 // Common symbols can't have aliases point to them, so they can't be tagged.
1768 if (GV.hasCommonLinkage())
1769 continue;
1770
1771 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1772 // which would be broken both by adding tags and potentially by the extra
1773 // padding/alignment that we insert.
1774 if (GV.hasSection())
1775 continue;
1776
1777 Globals.push_back(&GV);
1778 }
1779
1780 MD5 Hasher;
1781 Hasher.update(M.getSourceFileName());
1782 MD5::MD5Result Hash;
1783 Hasher.final(Hash);
1784 uint8_t Tag = Hash[0];
1785
1786 assert(TagMaskByte >= 16);
1787
1788 for (GlobalVariable *GV : Globals) {
1789 // Don't allow globals to be tagged with something that looks like a
1790 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1791 // the fast path shadow-vs-address check succeeds.
1792 if (Tag < 16 || Tag > TagMaskByte)
1793 Tag = 16;
1794 instrumentGlobal(GV, Tag++);
1795 }
1796}
1797
1798void HWAddressSanitizer::instrumentPersonalityFunctions() {
1799 // We need to untag stack frames as we unwind past them. That is the job of
1800 // the personality function wrapper, which either wraps an existing
1801 // personality function or acts as a personality function on its own. Each
1802 // function that has a personality function or that can be unwound past has
1803 // its personality function changed to a thunk that calls the personality
1804 // function wrapper in the runtime.
1806 for (Function &F : M) {
1807 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1808 continue;
1809
1810 if (F.hasPersonalityFn()) {
1811 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1812 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1813 PersonalityFns[nullptr].push_back(&F);
1814 }
1815 }
1816
1817 if (PersonalityFns.empty())
1818 return;
1819
1820 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1821 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1822 PtrTy, PtrTy, PtrTy, PtrTy);
1823 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1824 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1825
1826 for (auto &P : PersonalityFns) {
1827 std::string ThunkName = kHwasanPersonalityThunkName;
1828 if (P.first)
1829 ThunkName += ("." + P.first->getName()).str();
1830 FunctionType *ThunkFnTy = FunctionType::get(
1831 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1832 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1833 cast<GlobalValue>(P.first)->hasLocalLinkage());
1834 auto *ThunkFn = Function::Create(ThunkFnTy,
1837 ThunkName, &M);
1838 if (!IsLocal) {
1839 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1840 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1841 }
1842
1843 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1844 IRBuilder<> IRB(BB);
1845 CallInst *WrapperCall = IRB.CreateCall(
1846 HwasanPersonalityWrapper,
1847 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1848 ThunkFn->getArg(3), ThunkFn->getArg(4),
1849 P.first ? P.first : Constant::getNullValue(PtrTy),
1850 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1851 WrapperCall->setTailCall();
1852 IRB.CreateRet(WrapperCall);
1853
1854 for (Function *F : P.second)
1855 F->setPersonalityFn(ThunkFn);
1856 }
1857}
1858
1859void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1860 bool InstrumentWithCalls) {
1861 Scale = kDefaultShadowScale;
1862 if (TargetTriple.isOSFuchsia()) {
1863 // Fuchsia is always PIE, which means that the beginning of the address
1864 // space is always available.
1865 InGlobal = false;
1866 InTls = false;
1867 Offset = 0;
1868 WithFrameRecord = true;
1869 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1870 InGlobal = false;
1871 InTls = false;
1873 WithFrameRecord = false;
1874 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1875 InGlobal = false;
1876 InTls = false;
1877 Offset = 0;
1878 WithFrameRecord = false;
1879 } else if (ClWithIfunc) {
1880 InGlobal = true;
1881 InTls = false;
1883 WithFrameRecord = false;
1884 } else if (ClWithTls) {
1885 InGlobal = false;
1886 InTls = true;
1888 WithFrameRecord = true;
1889 } else {
1890 InGlobal = false;
1891 InTls = false;
1893 WithFrameRecord = false;
1894 }
1895}
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:684
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1309
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static cl::opt< float > ClRandomSkipRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function."))
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cuttoff."))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:424
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:635
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:706
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2281
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2618
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2267
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2611
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2253
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:477
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:172
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:550
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:80
void setComdat(Comdat *C)
Definition: Globals.cpp:206
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:237
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:355
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:481
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1896
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2190
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:592
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:105
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1996
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1112
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1395
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2265
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2269
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1807
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1433
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2041
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1820
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2027
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1514
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:566
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2273
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2216
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1473
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1536
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1542
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:223
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:688
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:215
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:361
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:771
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:769
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:989
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:911
bool isOSFuchsia() const
Definition: Triple.h:588
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:501
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
int getNumOccurrences() const
Definition: CommandLine.h:399
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1733
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
bool isLifetimeIntrinsic(Value *V)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4103
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec