LLVM 19.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/MD5.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
86 std::numeric_limits<uint64_t>::max();
87
88static const unsigned kShadowBaseAlignment = 32;
89
91 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
92 cl::desc("Prefix for memory access callbacks"),
93 cl::Hidden, cl::init("__hwasan_"));
94
96 "hwasan-kernel-mem-intrinsic-prefix",
97 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
98 cl::init(false));
99
101 "hwasan-instrument-with-calls",
102 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
103 cl::init(false));
104
105static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
106 cl::desc("instrument read instructions"),
107 cl::Hidden, cl::init(true));
108
109static cl::opt<bool>
110 ClInstrumentWrites("hwasan-instrument-writes",
111 cl::desc("instrument write instructions"), cl::Hidden,
112 cl::init(true));
113
115 "hwasan-instrument-atomics",
116 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
117 cl::init(true));
118
119static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
120 cl::desc("instrument byval arguments"),
121 cl::Hidden, cl::init(true));
122
123static cl::opt<bool>
124 ClRecover("hwasan-recover",
125 cl::desc("Enable recovery mode (continue-after-error)."),
126 cl::Hidden, cl::init(false));
127
128static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
129 cl::desc("instrument stack (allocas)"),
130 cl::Hidden, cl::init(true));
131
132static cl::opt<bool>
133 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
134 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
136
138 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
140 cl::desc("How many lifetime ends to handle for a single alloca."),
142
143static cl::opt<bool>
144 ClUseAfterScope("hwasan-use-after-scope",
145 cl::desc("detect use after scope within function"),
146 cl::Hidden, cl::init(true));
147
149 "hwasan-generate-tags-with-calls",
150 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
151 cl::init(false));
152
153static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
154 cl::Hidden, cl::init(false));
155
157 "hwasan-match-all-tag",
158 cl::desc("don't report bad accesses via pointers with this tag"),
159 cl::Hidden, cl::init(-1));
160
161static cl::opt<bool>
162 ClEnableKhwasan("hwasan-kernel",
163 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
164 cl::Hidden, cl::init(false));
165
166// These flags allow to change the shadow mapping and control how shadow memory
167// is accessed. The shadow mapping looks like:
168// Shadow = (Mem >> scale) + offset
169
171 ClMappingOffset("hwasan-mapping-offset",
172 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
173 cl::Hidden, cl::init(0));
174
175static cl::opt<bool>
176 ClWithIfunc("hwasan-with-ifunc",
177 cl::desc("Access dynamic shadow through an ifunc global on "
178 "platforms that support this"),
179 cl::Hidden, cl::init(false));
180
182 "hwasan-with-tls",
183 cl::desc("Access dynamic shadow through an thread-local pointer on "
184 "platforms that support this"),
185 cl::Hidden, cl::init(true));
186
187static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
188 cl::desc("Hot percentile cuttoff."));
189
190static cl::opt<float>
191 ClRandomSkipRate("hwasan-random-rate",
192 cl::desc("Probability value in the range [0.0, 1.0] "
193 "to keep instrumentation of a function."));
194
195STATISTIC(NumTotalFuncs, "Number of total funcs");
196STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
197STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
198
199// Mode for selecting how to insert frame record info into the stack ring
200// buffer.
202 // Do not record frame record info.
204
205 // Insert instructions into the prologue for storing into the stack ring
206 // buffer directly.
208
209 // Add a call to __hwasan_add_frame_record in the runtime.
211};
212
214 "hwasan-record-stack-history",
215 cl::desc("Record stack frames with tagged allocations in a thread-local "
216 "ring buffer"),
217 cl::values(clEnumVal(none, "Do not record stack ring history"),
218 clEnumVal(instr, "Insert instructions into the prologue for "
219 "storing into the stack ring buffer directly"),
220 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
221 "storing into the stack ring buffer")),
223
224static cl::opt<bool>
225 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
226 cl::desc("instrument memory intrinsics"),
227 cl::Hidden, cl::init(true));
228
229static cl::opt<bool>
230 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
231 cl::desc("instrument landing pads"), cl::Hidden,
232 cl::init(false));
233
235 "hwasan-use-short-granules",
236 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
237 cl::init(false));
238
240 "hwasan-instrument-personality-functions",
241 cl::desc("instrument personality functions"), cl::Hidden);
242
243static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
244 cl::desc("inline all checks"),
245 cl::Hidden, cl::init(false));
246
247static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
248 cl::desc("inline all checks"),
249 cl::Hidden, cl::init(false));
250
251// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
252static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
253 cl::desc("Use page aliasing in HWASan"),
254 cl::Hidden, cl::init(false));
255
256namespace {
257
258template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
259 return Opt.getNumOccurrences() ? Opt : Other;
260}
261
262bool shouldUsePageAliases(const Triple &TargetTriple) {
263 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
264}
265
266bool shouldInstrumentStack(const Triple &TargetTriple) {
267 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
268}
269
270bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
271 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
272}
273
274bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
275 return optOr(ClUseStackSafety, !DisableOptimization);
276}
277
278bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
279 bool DisableOptimization) {
280 return shouldInstrumentStack(TargetTriple) &&
281 mightUseStackSafetyAnalysis(DisableOptimization);
282}
283
284bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
285 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
286}
287
288/// An instrumentation pass implementing detection of addressability bugs
289/// using tagged pointers.
290class HWAddressSanitizer {
291public:
292 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
293 const StackSafetyGlobalInfo *SSI)
294 : M(M), SSI(SSI) {
295 this->Recover = optOr(ClRecover, Recover);
296 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
297 this->Rng = ClRandomSkipRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
298 : nullptr;
299
300 initializeModule();
301 }
302
303 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
304
305private:
306 struct ShadowTagCheckInfo {
307 Instruction *TagMismatchTerm = nullptr;
308 Value *PtrLong = nullptr;
309 Value *AddrLong = nullptr;
310 Value *PtrTag = nullptr;
311 Value *MemTag = nullptr;
312 };
313
314 bool selectiveInstrumentationShouldSkip(Function &F,
316 void initializeModule();
317 void createHwasanCtorComdat();
318
319 void initializeCallbacks(Module &M);
320
321 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
322
323 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
324 Value *getShadowNonTls(IRBuilder<> &IRB);
325
326 void untagPointerOperand(Instruction *I, Value *Addr);
327 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
328
329 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
330 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
331 DomTreeUpdater &DTU, LoopInfo *LI);
332 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
333 unsigned AccessSizeIndex,
334 Instruction *InsertBefore,
335 DomTreeUpdater &DTU, LoopInfo *LI);
336 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
337 unsigned AccessSizeIndex,
338 Instruction *InsertBefore, DomTreeUpdater &DTU,
339 LoopInfo *LI);
340 bool ignoreMemIntrinsic(MemIntrinsic *MI);
341 void instrumentMemIntrinsic(MemIntrinsic *MI);
342 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
343 LoopInfo *LI);
344 bool ignoreAccess(Instruction *Inst, Value *Ptr);
345 void getInterestingMemoryOperands(
346 Instruction *I, const TargetLibraryInfo &TLI,
348
349 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
350 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
351 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
352 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
353 const DominatorTree &DT, const PostDominatorTree &PDT,
354 const LoopInfo &LI);
355 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
356 Value *getNextTagWithCall(IRBuilder<> &IRB);
357 Value *getStackBaseTag(IRBuilder<> &IRB);
358 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
359 Value *getUARTag(IRBuilder<> &IRB);
360
361 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
362 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
363 unsigned retagMask(unsigned AllocaNo);
364
365 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
366
367 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
368 void instrumentGlobals();
369
370 Value *getCachedFP(IRBuilder<> &IRB);
371 Value *getFrameRecordInfo(IRBuilder<> &IRB);
372
373 void instrumentPersonalityFunctions();
374
375 LLVMContext *C;
376 Module &M;
377 const StackSafetyGlobalInfo *SSI;
378 Triple TargetTriple;
379 std::unique_ptr<RandomNumberGenerator> Rng;
380
381 /// This struct defines the shadow mapping using the rule:
382 /// shadow = (mem >> Scale) + Offset.
383 /// If InGlobal is true, then
384 /// extern char __hwasan_shadow[];
385 /// shadow = (mem >> Scale) + &__hwasan_shadow
386 /// If InTls is true, then
387 /// extern char *__hwasan_tls;
388 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
389 ///
390 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
391 /// ring buffer for storing stack allocations on targets that support it.
392 struct ShadowMapping {
393 uint8_t Scale;
395 bool InGlobal;
396 bool InTls;
397 bool WithFrameRecord;
398
399 void init(Triple &TargetTriple, bool InstrumentWithCalls);
400 Align getObjectAlignment() const { return Align(1ULL << Scale); }
401 };
402
403 ShadowMapping Mapping;
404
405 Type *VoidTy = Type::getVoidTy(M.getContext());
406 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
407 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
408 Type *Int8Ty = Type::getInt8Ty(M.getContext());
409 Type *Int32Ty = Type::getInt32Ty(M.getContext());
410 Type *Int64Ty = Type::getInt64Ty(M.getContext());
411
412 bool CompileKernel;
413 bool Recover;
414 bool OutlinedChecks;
415 bool InlineFastPath;
416 bool UseShortGranules;
417 bool InstrumentLandingPads;
418 bool InstrumentWithCalls;
419 bool InstrumentStack;
420 bool InstrumentGlobals;
421 bool DetectUseAfterScope;
422 bool UsePageAliases;
423 bool UseMatchAllCallback;
424
425 std::optional<uint8_t> MatchAllTag;
426
427 unsigned PointerTagShift;
428 uint64_t TagMaskByte;
429
430 Function *HwasanCtorFunction;
431
432 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
433 FunctionCallee HwasanMemoryAccessCallbackSized[2];
434
435 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
436 FunctionCallee HwasanHandleVfork;
437
438 FunctionCallee HwasanTagMemoryFunc;
439 FunctionCallee HwasanGenerateTagFunc;
440 FunctionCallee HwasanRecordFrameRecordFunc;
441
442 Constant *ShadowGlobal;
443
444 Value *ShadowBase = nullptr;
445 Value *StackBaseTag = nullptr;
446 Value *CachedFP = nullptr;
447 GlobalValue *ThreadPtrGlobal = nullptr;
448};
449
450} // end anonymous namespace
451
454 const StackSafetyGlobalInfo *SSI = nullptr;
455 auto TargetTriple = llvm::Triple(M.getTargetTriple());
456 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
458
459 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
460 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
461 for (Function &F : M)
462 HWASan.sanitizeFunction(F, FAM);
463
465 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
466 // are incrementally updated throughout this pass whenever
467 // SplitBlockAndInsertIfThen is called.
471 // GlobalsAA is considered stateless and does not get invalidated unless
472 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
473 // make changes that require GlobalsAA to be invalidated.
474 PA.abandon<GlobalsAA>();
475 return PA;
476}
478 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
480 OS, MapClassName2PassName);
481 OS << '<';
482 if (Options.CompileKernel)
483 OS << "kernel;";
484 if (Options.Recover)
485 OS << "recover";
486 OS << '>';
487}
488
489void HWAddressSanitizer::createHwasanCtorComdat() {
490 std::tie(HwasanCtorFunction, std::ignore) =
493 /*InitArgTypes=*/{},
494 /*InitArgs=*/{},
495 // This callback is invoked when the functions are created the first
496 // time. Hook them into the global ctors list in that case:
497 [&](Function *Ctor, FunctionCallee) {
498 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
499 Ctor->setComdat(CtorComdat);
500 appendToGlobalCtors(M, Ctor, 0, Ctor);
501 });
502
503 // Create a note that contains pointers to the list of global
504 // descriptors. Adding a note to the output file will cause the linker to
505 // create a PT_NOTE program header pointing to the note that we can use to
506 // find the descriptor list starting from the program headers. A function
507 // provided by the runtime initializes the shadow memory for the globals by
508 // accessing the descriptor list via the note. The dynamic loader needs to
509 // call this function whenever a library is loaded.
510 //
511 // The reason why we use a note for this instead of a more conventional
512 // approach of having a global constructor pass a descriptor list pointer to
513 // the runtime is because of an order of initialization problem. With
514 // constructors we can encounter the following problematic scenario:
515 //
516 // 1) library A depends on library B and also interposes one of B's symbols
517 // 2) B's constructors are called before A's (as required for correctness)
518 // 3) during construction, B accesses one of its "own" globals (actually
519 // interposed by A) and triggers a HWASAN failure due to the initialization
520 // for A not having happened yet
521 //
522 // Even without interposition it is possible to run into similar situations in
523 // cases where two libraries mutually depend on each other.
524 //
525 // We only need one note per binary, so put everything for the note in a
526 // comdat. This needs to be a comdat with an .init_array section to prevent
527 // newer versions of lld from discarding the note.
528 //
529 // Create the note even if we aren't instrumenting globals. This ensures that
530 // binaries linked from object files with both instrumented and
531 // non-instrumented globals will end up with a note, even if a comdat from an
532 // object file with non-instrumented globals is selected. The note is harmless
533 // if the runtime doesn't support it, since it will just be ignored.
534 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
535
536 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
537 auto *Start =
538 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
539 nullptr, "__start_hwasan_globals");
540 Start->setVisibility(GlobalValue::HiddenVisibility);
541 auto *Stop =
542 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
543 nullptr, "__stop_hwasan_globals");
544 Stop->setVisibility(GlobalValue::HiddenVisibility);
545
546 // Null-terminated so actually 8 bytes, which are required in order to align
547 // the note properly.
548 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
549
550 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
552 auto *Note =
553 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
555 Note->setSection(".note.hwasan.globals");
556 Note->setComdat(NoteComdat);
557 Note->setAlignment(Align(4));
558
559 // The pointers in the note need to be relative so that the note ends up being
560 // placed in rodata, which is the standard location for notes.
561 auto CreateRelPtr = [&](Constant *Ptr) {
565 Int32Ty);
566 };
567 Note->setInitializer(ConstantStruct::getAnon(
568 {ConstantInt::get(Int32Ty, 8), // n_namesz
569 ConstantInt::get(Int32Ty, 8), // n_descsz
570 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
571 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
573
574 // Create a zero-length global in hwasan_globals so that the linker will
575 // always create start and stop symbols.
576 auto *Dummy = new GlobalVariable(
577 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
578 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
579 Dummy->setSection("hwasan_globals");
580 Dummy->setComdat(NoteComdat);
581 Dummy->setMetadata(LLVMContext::MD_associated,
583 appendToCompilerUsed(M, Dummy);
584}
585
586/// Module-level initialization.
587///
588/// inserts a call to __hwasan_init to the module's constructor list.
589void HWAddressSanitizer::initializeModule() {
590 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
591 TargetTriple = Triple(M.getTargetTriple());
592
593 // x86_64 currently has two modes:
594 // - Intel LAM (default)
595 // - pointer aliasing (heap only)
596 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
597 UsePageAliases = shouldUsePageAliases(TargetTriple);
598 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
599 InstrumentStack = shouldInstrumentStack(TargetTriple);
600 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
601 PointerTagShift = IsX86_64 ? 57 : 56;
602 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
603
604 Mapping.init(TargetTriple, InstrumentWithCalls);
605
606 C = &(M.getContext());
607 IRBuilder<> IRB(*C);
608
609 HwasanCtorFunction = nullptr;
610
611 // Older versions of Android do not have the required runtime support for
612 // short granules, global or personality function instrumentation. On other
613 // platforms we currently require using the latest version of the runtime.
614 bool NewRuntime =
615 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
616
617 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
618 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
619 TargetTriple.isOSBinFormatELF() &&
620 !optOr(ClInlineAllChecks, Recover);
621
622 // These platforms may prefer less inlining to reduce binary size.
623 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
624 TargetTriple.isOSFuchsia()));
625
626 if (ClMatchAllTag.getNumOccurrences()) {
627 if (ClMatchAllTag != -1) {
628 MatchAllTag = ClMatchAllTag & 0xFF;
629 }
630 } else if (CompileKernel) {
631 MatchAllTag = 0xFF;
632 }
633 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
634
635 // If we don't have personality function support, fall back to landing pads.
636 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
637
638 InstrumentGlobals =
639 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
640
641 if (!CompileKernel) {
642 createHwasanCtorComdat();
643
644 if (InstrumentGlobals)
645 instrumentGlobals();
646
647 bool InstrumentPersonalityFunctions =
648 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
649 if (InstrumentPersonalityFunctions)
650 instrumentPersonalityFunctions();
651 }
652
653 if (!TargetTriple.isAndroid()) {
654 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
655 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
657 "__hwasan_tls", nullptr,
660 return GV;
661 });
662 ThreadPtrGlobal = cast<GlobalVariable>(C);
663 }
664}
665
666void HWAddressSanitizer::initializeCallbacks(Module &M) {
667 IRBuilder<> IRB(*C);
668 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
669 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
670 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
671 *HwasanMemsetFnTy;
672 if (UseMatchAllCallback) {
673 HwasanMemoryAccessCallbackSizedFnTy =
674 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
675 HwasanMemoryAccessCallbackFnTy =
676 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
677 HwasanMemTransferFnTy =
678 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
679 HwasanMemsetFnTy =
680 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
681 } else {
682 HwasanMemoryAccessCallbackSizedFnTy =
683 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
684 HwasanMemoryAccessCallbackFnTy =
685 FunctionType::get(VoidTy, {IntptrTy}, false);
686 HwasanMemTransferFnTy =
687 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
688 HwasanMemsetFnTy =
689 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
690 }
691
692 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
693 const std::string TypeStr = AccessIsWrite ? "store" : "load";
694 const std::string EndingStr = Recover ? "_noabort" : "";
695
696 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
697 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
698 HwasanMemoryAccessCallbackSizedFnTy);
699
700 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
701 AccessSizeIndex++) {
702 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
703 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
704 itostr(1ULL << AccessSizeIndex) +
705 MatchAllStr + EndingStr,
706 HwasanMemoryAccessCallbackFnTy);
707 }
708 }
709
710 const std::string MemIntrinCallbackPrefix =
711 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
712 ? std::string("")
714
715 HwasanMemmove = M.getOrInsertFunction(
716 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
717 HwasanMemcpy = M.getOrInsertFunction(
718 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
719 HwasanMemset = M.getOrInsertFunction(
720 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
721
722 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
723 PtrTy, Int8Ty, IntptrTy);
724 HwasanGenerateTagFunc =
725 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
726
727 HwasanRecordFrameRecordFunc =
728 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
729
730 ShadowGlobal =
731 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
732
733 HwasanHandleVfork =
734 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
735}
736
737Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
738 // An empty inline asm with input reg == output reg.
739 // An opaque no-op cast, basically.
740 // This prevents code bloat as a result of rematerializing trivial definitions
741 // such as constants or global addresses at every load and store.
742 InlineAsm *Asm =
743 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
744 StringRef(""), StringRef("=r,0"),
745 /*hasSideEffects=*/false);
746 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
747}
748
749Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
750 return getOpaqueNoopCast(IRB, ShadowGlobal);
751}
752
753Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
754 if (Mapping.Offset != kDynamicShadowSentinel)
755 return getOpaqueNoopCast(
757 ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
758
759 if (Mapping.InGlobal)
760 return getDynamicShadowIfunc(IRB);
761
762 Value *GlobalDynamicAddress =
765 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
766}
767
768bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
769 // Do not instrument accesses from different address spaces; we cannot deal
770 // with them.
771 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
772 if (PtrTy->getPointerAddressSpace() != 0)
773 return true;
774
775 // Ignore swifterror addresses.
776 // swifterror memory addresses are mem2reg promoted by instruction
777 // selection. As such they cannot have regular uses like an instrumentation
778 // function and it makes no sense to track them as memory.
779 if (Ptr->isSwiftError())
780 return true;
781
782 if (findAllocaForValue(Ptr)) {
783 if (!InstrumentStack)
784 return true;
785 if (SSI && SSI->stackAccessIsSafe(*Inst))
786 return true;
787 }
788
789 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
790 if (!InstrumentGlobals)
791 return true;
792 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
793 }
794
795 return false;
796}
797
798void HWAddressSanitizer::getInterestingMemoryOperands(
799 Instruction *I, const TargetLibraryInfo &TLI,
801 // Skip memory accesses inserted by another instrumentation.
802 if (I->hasMetadata(LLVMContext::MD_nosanitize))
803 return;
804
805 // Do not instrument the load fetching the dynamic shadow address.
806 if (ShadowBase == I)
807 return;
808
809 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
810 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
811 return;
812 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
813 LI->getType(), LI->getAlign());
814 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
815 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
816 return;
817 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
818 SI->getValueOperand()->getType(), SI->getAlign());
819 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
820 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
821 return;
822 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
823 RMW->getValOperand()->getType(), std::nullopt);
824 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
825 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
826 return;
827 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
828 XCHG->getCompareOperand()->getType(),
829 std::nullopt);
830 } else if (auto *CI = dyn_cast<CallInst>(I)) {
831 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
832 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
833 ignoreAccess(I, CI->getArgOperand(ArgNo)))
834 continue;
835 Type *Ty = CI->getParamByValType(ArgNo);
836 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
837 }
839 }
840}
841
843 if (LoadInst *LI = dyn_cast<LoadInst>(I))
844 return LI->getPointerOperandIndex();
845 if (StoreInst *SI = dyn_cast<StoreInst>(I))
846 return SI->getPointerOperandIndex();
847 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
848 return RMW->getPointerOperandIndex();
849 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
850 return XCHG->getPointerOperandIndex();
851 report_fatal_error("Unexpected instruction");
852 return -1;
853}
854
856 size_t Res = llvm::countr_zero(TypeSize / 8);
858 return Res;
859}
860
861void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
862 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
863 TargetTriple.isRISCV64())
864 return;
865
866 IRBuilder<> IRB(I);
867 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
868 Value *UntaggedPtr =
869 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
870 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
871}
872
873Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
874 // Mem >> Scale
875 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
876 if (Mapping.Offset == 0)
877 return IRB.CreateIntToPtr(Shadow, PtrTy);
878 // (Mem >> Scale) + Offset
879 return IRB.CreatePtrAdd(ShadowBase, Shadow);
880}
881
882int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
883 unsigned AccessSizeIndex) {
884 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
885 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
886 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
887 (Recover << HWASanAccessInfo::RecoverShift) |
888 (IsWrite << HWASanAccessInfo::IsWriteShift) |
889 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
890}
891
892HWAddressSanitizer::ShadowTagCheckInfo
893HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
894 DomTreeUpdater &DTU, LoopInfo *LI) {
895 ShadowTagCheckInfo R;
896
897 IRBuilder<> IRB(InsertBefore);
898
899 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
900 R.PtrTag =
901 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
902 R.AddrLong = untagPointer(IRB, R.PtrLong);
903 Value *Shadow = memToShadow(R.AddrLong, IRB);
904 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
905 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
906
907 if (MatchAllTag.has_value()) {
908 Value *TagNotIgnored = IRB.CreateICmpNE(
909 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
910 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
911 }
912
913 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
914 TagMismatch, InsertBefore, false,
915 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
916
917 return R;
918}
919
920void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
921 unsigned AccessSizeIndex,
922 Instruction *InsertBefore,
923 DomTreeUpdater &DTU,
924 LoopInfo *LI) {
925 assert(!UsePageAliases);
926 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
927
928 if (InlineFastPath)
929 InsertBefore =
930 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
931
932 IRBuilder<> IRB(InsertBefore);
934 bool useFixedShadowIntrinsic = false;
935 // The memaccess fixed shadow intrinsic is only supported on AArch64,
936 // which allows a 16-bit immediate to be left-shifted by 32.
937 // Since kShadowBaseAlignment == 32, and Linux by default will not
938 // mmap above 48-bits, practically any valid shadow offset is
939 // representable.
940 // In particular, an offset of 4TB (1024 << 32) is representable, and
941 // ought to be good enough for anybody.
942 if (TargetTriple.isAArch64() && Mapping.Offset != kDynamicShadowSentinel) {
943 uint16_t offset_shifted = Mapping.Offset >> 32;
944 useFixedShadowIntrinsic = (uint64_t)offset_shifted << 32 == Mapping.Offset;
945 }
946
947 if (useFixedShadowIntrinsic)
948 IRB.CreateCall(
950 M, UseShortGranules
951 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
952 : Intrinsic::hwasan_check_memaccess_fixedshadow),
953 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
954 ConstantInt::get(Int64Ty, Mapping.Offset)});
955 else
957 M, UseShortGranules
958 ? Intrinsic::hwasan_check_memaccess_shortgranules
959 : Intrinsic::hwasan_check_memaccess),
960 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
961}
962
963void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
964 unsigned AccessSizeIndex,
965 Instruction *InsertBefore,
966 DomTreeUpdater &DTU,
967 LoopInfo *LI) {
968 assert(!UsePageAliases);
969 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
970
971 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
972
973 IRBuilder<> IRB(TCI.TagMismatchTerm);
974 Value *OutOfShortGranuleTagRange =
975 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
976 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
977 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
978 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
979
980 IRB.SetInsertPoint(TCI.TagMismatchTerm);
981 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
982 PtrLowBits = IRB.CreateAdd(
983 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
984 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
985 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
987 LI, CheckFailTerm->getParent());
988
989 IRB.SetInsertPoint(TCI.TagMismatchTerm);
990 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
991 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
992 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
993 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
994 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
996 LI, CheckFailTerm->getParent());
997
998 IRB.SetInsertPoint(CheckFailTerm);
999 InlineAsm *Asm;
1000 switch (TargetTriple.getArch()) {
1001 case Triple::x86_64:
1002 // The signal handler will find the data address in rdi.
1004 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1005 "int3\nnopl " +
1006 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1007 "(%rax)",
1008 "{rdi}",
1009 /*hasSideEffects=*/true);
1010 break;
1011 case Triple::aarch64:
1012 case Triple::aarch64_be:
1013 // The signal handler will find the data address in x0.
1015 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1016 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1017 "{x0}",
1018 /*hasSideEffects=*/true);
1019 break;
1020 case Triple::riscv64:
1021 // The signal handler will find the data address in x10.
1023 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1024 "ebreak\naddiw x0, x11, " +
1025 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1026 "{x10}",
1027 /*hasSideEffects=*/true);
1028 break;
1029 default:
1030 report_fatal_error("unsupported architecture");
1031 }
1032 IRB.CreateCall(Asm, TCI.PtrLong);
1033 if (Recover)
1034 cast<BranchInst>(CheckFailTerm)
1035 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1036}
1037
1038bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
1039 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1040 return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
1041 (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
1042 }
1043 if (isa<MemSetInst>(MI))
1044 return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
1045 return false;
1046}
1047
1048void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1049 IRBuilder<> IRB(MI);
1050 if (isa<MemTransferInst>(MI)) {
1052 MI->getOperand(0), MI->getOperand(1),
1053 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1054
1055 if (UseMatchAllCallback)
1056 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1057 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1058 } else if (isa<MemSetInst>(MI)) {
1060 MI->getOperand(0),
1061 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1062 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1063 if (UseMatchAllCallback)
1064 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1065 IRB.CreateCall(HwasanMemset, Args);
1066 }
1067 MI->eraseFromParent();
1068}
1069
1070bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1071 DomTreeUpdater &DTU,
1072 LoopInfo *LI) {
1073 Value *Addr = O.getPtr();
1074
1075 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1076
1077 if (O.MaybeMask)
1078 return false; // FIXME
1079
1080 IRBuilder<> IRB(O.getInsn());
1081 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1082 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1083 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1084 *O.Alignment >= O.TypeStoreSize / 8)) {
1085 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1086 if (InstrumentWithCalls) {
1088 if (UseMatchAllCallback)
1089 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1090 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1091 Args);
1092 } else if (OutlinedChecks) {
1093 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1094 DTU, LI);
1095 } else {
1096 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1097 DTU, LI);
1098 }
1099 } else {
1101 IRB.CreatePointerCast(Addr, IntptrTy),
1102 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1103 ConstantInt::get(IntptrTy, 8))};
1104 if (UseMatchAllCallback)
1105 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1106 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1107 }
1108 untagPointerOperand(O.getInsn(), Addr);
1109
1110 return true;
1111}
1112
1113void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1114 size_t Size) {
1115 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1116 if (!UseShortGranules)
1117 Size = AlignedSize;
1118
1119 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1120 if (InstrumentWithCalls) {
1121 IRB.CreateCall(HwasanTagMemoryFunc,
1122 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1123 ConstantInt::get(IntptrTy, AlignedSize)});
1124 } else {
1125 size_t ShadowSize = Size >> Mapping.Scale;
1126 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1127 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1128 // If this memset is not inlined, it will be intercepted in the hwasan
1129 // runtime library. That's OK, because the interceptor skips the checks if
1130 // the address is in the shadow region.
1131 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1132 // llvm.memset right here into either a sequence of stores, or a call to
1133 // hwasan_tag_memory.
1134 if (ShadowSize)
1135 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1136 if (Size != AlignedSize) {
1137 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1138 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1139 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1140 IRB.CreateStore(
1141 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1142 AlignedSize - 1));
1143 }
1144 }
1145}
1146
1147unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1148 if (TargetTriple.getArch() == Triple::x86_64)
1149 return AllocaNo & TagMaskByte;
1150
1151 // A list of 8-bit numbers that have at most one run of non-zero bits.
1152 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1153 // masks.
1154 // The list does not include the value 255, which is used for UAR.
1155 //
1156 // Because we are more likely to use earlier elements of this list than later
1157 // ones, it is sorted in increasing order of probability of collision with a
1158 // mask allocated (temporally) nearby. The program that generated this list
1159 // can be found at:
1160 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1161 static const unsigned FastMasks[] = {
1162 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1163 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1164 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1165 return FastMasks[AllocaNo % std::size(FastMasks)];
1166}
1167
1168Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1169 if (TagMaskByte == 0xFF)
1170 return OldTag; // No need to clear the tag byte.
1171 return IRB.CreateAnd(OldTag,
1172 ConstantInt::get(OldTag->getType(), TagMaskByte));
1173}
1174
1175Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1176 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1177}
1178
1179Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1181 return nullptr;
1182 if (StackBaseTag)
1183 return StackBaseTag;
1184 // Extract some entropy from the stack pointer for the tags.
1185 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1186 // between functions).
1187 Value *FramePointerLong = getCachedFP(IRB);
1188 Value *StackTag =
1189 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1190 IRB.CreateLShr(FramePointerLong, 20)));
1191 StackTag->setName("hwasan.stack.base.tag");
1192 return StackTag;
1193}
1194
1195Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1196 unsigned AllocaNo) {
1198 return getNextTagWithCall(IRB);
1199 return IRB.CreateXor(
1200 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1201}
1202
1203Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1204 Value *FramePointerLong = getCachedFP(IRB);
1205 Value *UARTag =
1206 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1207
1208 UARTag->setName("hwasan.uar.tag");
1209 return UARTag;
1210}
1211
1212// Add a tag to an address.
1213Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1214 Value *PtrLong, Value *Tag) {
1215 assert(!UsePageAliases);
1216 Value *TaggedPtrLong;
1217 if (CompileKernel) {
1218 // Kernel addresses have 0xFF in the most significant byte.
1219 Value *ShiftedTag =
1220 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1221 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1222 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1223 } else {
1224 // Userspace can simply do OR (tag << PointerTagShift);
1225 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1226 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1227 }
1228 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1229}
1230
1231// Remove tag from an address.
1232Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1233 assert(!UsePageAliases);
1234 Value *UntaggedPtrLong;
1235 if (CompileKernel) {
1236 // Kernel addresses have 0xFF in the most significant byte.
1237 UntaggedPtrLong =
1238 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1239 TagMaskByte << PointerTagShift));
1240 } else {
1241 // Userspace addresses have 0x00.
1242 UntaggedPtrLong = IRB.CreateAnd(
1243 PtrLong, ConstantInt::get(PtrLong->getType(),
1244 ~(TagMaskByte << PointerTagShift)));
1245 }
1246 return UntaggedPtrLong;
1247}
1248
1249Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1250 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1251 // in Bionic's libc/platform/bionic/tls_defines.h.
1252 constexpr int SanitizerSlot = 6;
1253 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1254 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1255 return ThreadPtrGlobal;
1256}
1257
1258Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1259 if (!CachedFP)
1260 CachedFP = memtag::getFP(IRB);
1261 return CachedFP;
1262}
1263
1264Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1265 // Prepare ring buffer data.
1266 Value *PC = memtag::getPC(TargetTriple, IRB);
1267 Value *FP = getCachedFP(IRB);
1268
1269 // Mix FP and PC.
1270 // Assumptions:
1271 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1272 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1273 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1274 // 0xFFFFPPPPPPPPPPPP
1275 //
1276 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1277 // prefer FP-relative offsets for functions compiled with HWASan.
1278 FP = IRB.CreateShl(FP, 44);
1279 return IRB.CreateOr(PC, FP);
1280}
1281
1282void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1283 if (!Mapping.InTls)
1284 ShadowBase = getShadowNonTls(IRB);
1285 else if (!WithFrameRecord && TargetTriple.isAndroid())
1286 ShadowBase = getDynamicShadowIfunc(IRB);
1287
1288 if (!WithFrameRecord && ShadowBase)
1289 return;
1290
1291 Value *SlotPtr = nullptr;
1292 Value *ThreadLong = nullptr;
1293 Value *ThreadLongMaybeUntagged = nullptr;
1294
1295 auto getThreadLongMaybeUntagged = [&]() {
1296 if (!SlotPtr)
1297 SlotPtr = getHwasanThreadSlotPtr(IRB);
1298 if (!ThreadLong)
1299 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1300 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1301 // TBI.
1302 return TargetTriple.isAArch64() ? ThreadLong
1303 : untagPointer(IRB, ThreadLong);
1304 };
1305
1306 if (WithFrameRecord) {
1307 switch (ClRecordStackHistory) {
1308 case libcall: {
1309 // Emit a runtime call into hwasan rather than emitting instructions for
1310 // recording stack history.
1311 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1312 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1313 break;
1314 }
1315 case instr: {
1316 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1317
1318 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1319
1320 // Store data to ring buffer.
1321 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1322 Value *RecordPtr =
1323 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1324 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1325
1326 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1327 // buffer in pages, it must be a power of two, and the start of the buffer
1328 // must be aligned by twice that much. Therefore wrap around of the ring
1329 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1330 // The use of AShr instead of LShr is due to
1331 // https://bugs.llvm.org/show_bug.cgi?id=39030
1332 // Runtime library makes sure not to use the highest bit.
1333 //
1334 // Mechanical proof of this address calculation can be found at:
1335 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/prove_hwasanwrap.smt2
1336 //
1337 // Example of the wrap case for N = 1
1338 // Pointer: 0x01AAAAAAAAAAAFF8
1339 // +
1340 // 0x0000000000000008
1341 // =
1342 // 0x01AAAAAAAAAAB000
1343 // &
1344 // WrapMask: 0xFFFFFFFFFFFFF000
1345 // =
1346 // 0x01AAAAAAAAAAA000
1347 //
1348 // Then the WrapMask will be a no-op until the next wrap case.
1349 Value *WrapMask = IRB.CreateXor(
1350 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1351 ConstantInt::get(IntptrTy, (uint64_t)-1));
1352 Value *ThreadLongNew = IRB.CreateAnd(
1353 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1354 IRB.CreateStore(ThreadLongNew, SlotPtr);
1355 break;
1356 }
1357 case none: {
1359 "A stack history recording mode should've been selected.");
1360 }
1361 }
1362 }
1363
1364 if (!ShadowBase) {
1365 if (!ThreadLongMaybeUntagged)
1366 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1367
1368 // Get shadow base address by aligning RecordPtr up.
1369 // Note: this is not correct if the pointer is already aligned.
1370 // Runtime library will make sure this never happens.
1371 ShadowBase = IRB.CreateAdd(
1372 IRB.CreateOr(
1373 ThreadLongMaybeUntagged,
1374 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1375 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1376 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1377 }
1378}
1379
1380bool HWAddressSanitizer::instrumentLandingPads(
1381 SmallVectorImpl<Instruction *> &LandingPadVec) {
1382 for (auto *LP : LandingPadVec) {
1383 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1384 IRB.CreateCall(
1385 HwasanHandleVfork,
1387 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1388 }
1389 return true;
1390}
1391
1392bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1393 Value *StackTag, Value *UARTag,
1394 const DominatorTree &DT,
1395 const PostDominatorTree &PDT,
1396 const LoopInfo &LI) {
1397 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1398 // alloca addresses using that. Unfortunately, offsets are not known yet
1399 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1400 // temp, shift-OR it into each alloca address and xor with the retag mask.
1401 // This generates one extra instruction per alloca use.
1402 unsigned int I = 0;
1403
1404 for (auto &KV : SInfo.AllocasToInstrument) {
1405 auto N = I++;
1406 auto *AI = KV.first;
1407 memtag::AllocaInfo &Info = KV.second;
1409
1410 // Replace uses of the alloca with tagged address.
1411 Value *Tag = getAllocaTag(IRB, StackTag, N);
1412 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1413 Value *AINoTagLong = untagPointer(IRB, AILong);
1414 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1415 std::string Name =
1416 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1417 Replacement->setName(Name + ".hwasan");
1418
1419 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1420 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1421
1422 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1423
1424 auto HandleLifetime = [&](IntrinsicInst *II) {
1425 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1426 // set of assumptions we need to make about the lifetime. Without this we
1427 // would need to ensure that we can track the lifetime pointer to a
1428 // constant offset from the alloca, and would still need to change the
1429 // size to include the extra alignment we use for the untagging to make
1430 // the size consistent.
1431 //
1432 // The check for standard lifetime below makes sure that we have exactly
1433 // one set of start / end in any execution (i.e. the ends are not
1434 // reachable from each other), so this will not cause any problems.
1435 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1436 II->setArgOperand(1, AICast);
1437 };
1438 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1439 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1440
1441 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1442 auto *User = U.getUser();
1443 return User != AILong && User != AICast &&
1445 });
1446
1447 memtag::annotateDebugRecords(Info, retagMask(N));
1448
1449 auto TagEnd = [&](Instruction *Node) {
1450 IRB.SetInsertPoint(Node);
1451 // When untagging, use the `AlignedSize` because we need to set the tags
1452 // for the entire alloca to original. If we used `Size` here, we would
1453 // keep the last granule tagged, and store zero in the last byte of the
1454 // last granule, due to how short granules are implemented.
1455 tagAlloca(IRB, AI, UARTag, AlignedSize);
1456 };
1457 // Calls to functions that may return twice (e.g. setjmp) confuse the
1458 // postdominator analysis, and will leave us to keep memory tagged after
1459 // function return. Work around this by always untagging at every return
1460 // statement if return_twice functions are called.
1461 bool StandardLifetime =
1462 !SInfo.CallsReturnTwice &&
1463 SInfo.UnrecognizedLifetimes.empty() &&
1464 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1465 &LI, ClMaxLifetimes);
1466 if (DetectUseAfterScope && StandardLifetime) {
1467 IntrinsicInst *Start = Info.LifetimeStart[0];
1468 IRB.SetInsertPoint(Start->getNextNode());
1469 tagAlloca(IRB, AI, Tag, Size);
1470 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1471 SInfo.RetVec, TagEnd)) {
1472 for (auto *End : Info.LifetimeEnd)
1473 End->eraseFromParent();
1474 }
1475 } else {
1476 tagAlloca(IRB, AI, Tag, Size);
1477 for (auto *RI : SInfo.RetVec)
1478 TagEnd(RI);
1479 // We inserted tagging outside of the lifetimes, so we have to remove
1480 // them.
1481 for (auto &II : Info.LifetimeStart)
1482 II->eraseFromParent();
1483 for (auto &II : Info.LifetimeEnd)
1484 II->eraseFromParent();
1485 }
1486 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1487 }
1488 for (auto &I : SInfo.UnrecognizedLifetimes)
1489 I->eraseFromParent();
1490 return true;
1491}
1492
1494 bool Skip) {
1495 if (Skip) {
1496 ORE.emit([&]() {
1497 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1498 << "Skipped: F=" << ore::NV("Function", &F);
1499 });
1500 } else {
1501 ORE.emit([&]() {
1502 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1503 << "Sanitized: F=" << ore::NV("Function", &F);
1504 });
1505 }
1506}
1507
1508bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1510 bool Skip = [&]() {
1511 if (ClRandomSkipRate.getNumOccurrences()) {
1512 std::bernoulli_distribution D(ClRandomSkipRate);
1513 return !D(*Rng);
1514 }
1515 if (!ClHotPercentileCutoff.getNumOccurrences())
1516 return false;
1518 ProfileSummaryInfo *PSI =
1519 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1520 if (!PSI || !PSI->hasProfileSummary()) {
1521 ++NumNoProfileSummaryFuncs;
1522 return false;
1523 }
1524 return PSI->isFunctionHotInCallGraphNthPercentile(
1526 }();
1528 return Skip;
1529}
1530
1531void HWAddressSanitizer::sanitizeFunction(Function &F,
1533 if (&F == HwasanCtorFunction)
1534 return;
1535
1536 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1537 return;
1538
1539 if (F.empty())
1540 return;
1541
1542 NumTotalFuncs++;
1543
1544 if (selectiveInstrumentationShouldSkip(F, FAM))
1545 return;
1546
1547 NumInstrumentedFuncs++;
1548
1549 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1550
1551 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1552 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1553 SmallVector<Instruction *, 8> LandingPadVec;
1555
1556 memtag::StackInfoBuilder SIB(SSI);
1557 for (auto &Inst : instructions(F)) {
1558 if (InstrumentStack) {
1559 SIB.visit(Inst);
1560 }
1561
1562 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1563 LandingPadVec.push_back(&Inst);
1564
1565 getInterestingMemoryOperands(&Inst, TLI, OperandsToInstrument);
1566
1567 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1568 if (!ignoreMemIntrinsic(MI))
1569 IntrinToInstrument.push_back(MI);
1570 }
1571
1572 memtag::StackInfo &SInfo = SIB.get();
1573
1574 initializeCallbacks(*F.getParent());
1575
1576 if (!LandingPadVec.empty())
1577 instrumentLandingPads(LandingPadVec);
1578
1579 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1580 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1581 // __hwasan_personality_thunk is a no-op for functions without an
1582 // instrumented stack, so we can drop it.
1583 F.setPersonalityFn(nullptr);
1584 }
1585
1586 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1587 IntrinToInstrument.empty())
1588 return;
1589
1590 assert(!ShadowBase);
1591
1592 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1593 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1594 emitPrologue(EntryIRB,
1595 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1596 Mapping.WithFrameRecord &&
1597 !SInfo.AllocasToInstrument.empty());
1598
1599 if (!SInfo.AllocasToInstrument.empty()) {
1602 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1603 Value *StackTag = getStackBaseTag(EntryIRB);
1604 Value *UARTag = getUARTag(EntryIRB);
1605 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1606 }
1607
1608 // If we split the entry block, move any allocas that were originally in the
1609 // entry block back into the entry block so that they aren't treated as
1610 // dynamic allocas.
1611 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1612 InsertPt = F.getEntryBlock().begin();
1613 for (Instruction &I :
1614 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1615 if (auto *AI = dyn_cast<AllocaInst>(&I))
1616 if (isa<ConstantInt>(AI->getArraySize()))
1617 I.moveBefore(F.getEntryBlock(), InsertPt);
1618 }
1619 }
1620
1625 for (auto &Operand : OperandsToInstrument)
1626 instrumentMemAccess(Operand, DTU, LI);
1627 DTU.flush();
1628
1629 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1630 for (auto *Inst : IntrinToInstrument)
1631 instrumentMemIntrinsic(Inst);
1632 }
1633
1634 ShadowBase = nullptr;
1635 StackBaseTag = nullptr;
1636 CachedFP = nullptr;
1637}
1638
1639void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1640 assert(!UsePageAliases);
1641 Constant *Initializer = GV->getInitializer();
1642 uint64_t SizeInBytes =
1643 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1644 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1645 if (SizeInBytes != NewSize) {
1646 // Pad the initializer out to the next multiple of 16 bytes and add the
1647 // required short granule tag.
1648 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1649 Init.back() = Tag;
1651 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1652 }
1653
1654 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1655 GlobalValue::ExternalLinkage, Initializer,
1656 GV->getName() + ".hwasan");
1657 NewGV->copyAttributesFrom(GV);
1658 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1659 NewGV->copyMetadata(GV, 0);
1660 NewGV->setAlignment(
1661 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1662
1663 // It is invalid to ICF two globals that have different tags. In the case
1664 // where the size of the global is a multiple of the tag granularity the
1665 // contents of the globals may be the same but the tags (i.e. symbol values)
1666 // may be different, and the symbols are not considered during ICF. In the
1667 // case where the size is not a multiple of the granularity, the short granule
1668 // tags would discriminate two globals with different tags, but there would
1669 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1670 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1671 // granule tag in the last byte.
1672 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1673
1674 // Descriptor format (assuming little-endian):
1675 // bytes 0-3: relative address of global
1676 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1677 // it isn't, we create multiple descriptors)
1678 // byte 7: tag
1679 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1680 const uint64_t MaxDescriptorSize = 0xfffff0;
1681 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1682 DescriptorPos += MaxDescriptorSize) {
1683 auto *Descriptor =
1684 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1685 nullptr, GV->getName() + ".hwasan.descriptor");
1686 auto *GVRelPtr = ConstantExpr::getTrunc(
1689 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1690 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1691 ConstantInt::get(Int64Ty, DescriptorPos)),
1692 Int32Ty);
1693 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1694 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1695 Descriptor->setComdat(NewGV->getComdat());
1696 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1697 Descriptor->setSection("hwasan_globals");
1698 Descriptor->setMetadata(LLVMContext::MD_associated,
1700 appendToCompilerUsed(M, Descriptor);
1701 }
1702
1705 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1706 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1707 GV->getType());
1708 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1709 GV->getLinkage(), "", Aliasee, &M);
1710 Alias->setVisibility(GV->getVisibility());
1711 Alias->takeName(GV);
1712 GV->replaceAllUsesWith(Alias);
1713 GV->eraseFromParent();
1714}
1715
1716void HWAddressSanitizer::instrumentGlobals() {
1717 std::vector<GlobalVariable *> Globals;
1718 for (GlobalVariable &GV : M.globals()) {
1720 continue;
1721
1722 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1723 GV.isThreadLocal())
1724 continue;
1725
1726 // Common symbols can't have aliases point to them, so they can't be tagged.
1727 if (GV.hasCommonLinkage())
1728 continue;
1729
1730 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1731 // which would be broken both by adding tags and potentially by the extra
1732 // padding/alignment that we insert.
1733 if (GV.hasSection())
1734 continue;
1735
1736 Globals.push_back(&GV);
1737 }
1738
1739 MD5 Hasher;
1740 Hasher.update(M.getSourceFileName());
1741 MD5::MD5Result Hash;
1742 Hasher.final(Hash);
1743 uint8_t Tag = Hash[0];
1744
1745 assert(TagMaskByte >= 16);
1746
1747 for (GlobalVariable *GV : Globals) {
1748 // Don't allow globals to be tagged with something that looks like a
1749 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1750 // the fast path shadow-vs-address check succeeds.
1751 if (Tag < 16 || Tag > TagMaskByte)
1752 Tag = 16;
1753 instrumentGlobal(GV, Tag++);
1754 }
1755}
1756
1757void HWAddressSanitizer::instrumentPersonalityFunctions() {
1758 // We need to untag stack frames as we unwind past them. That is the job of
1759 // the personality function wrapper, which either wraps an existing
1760 // personality function or acts as a personality function on its own. Each
1761 // function that has a personality function or that can be unwound past has
1762 // its personality function changed to a thunk that calls the personality
1763 // function wrapper in the runtime.
1765 for (Function &F : M) {
1766 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1767 continue;
1768
1769 if (F.hasPersonalityFn()) {
1770 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1771 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1772 PersonalityFns[nullptr].push_back(&F);
1773 }
1774 }
1775
1776 if (PersonalityFns.empty())
1777 return;
1778
1779 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1780 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1781 PtrTy, PtrTy, PtrTy, PtrTy);
1782 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1783 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1784
1785 for (auto &P : PersonalityFns) {
1786 std::string ThunkName = kHwasanPersonalityThunkName;
1787 if (P.first)
1788 ThunkName += ("." + P.first->getName()).str();
1789 FunctionType *ThunkFnTy = FunctionType::get(
1790 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1791 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1792 cast<GlobalValue>(P.first)->hasLocalLinkage());
1793 auto *ThunkFn = Function::Create(ThunkFnTy,
1796 ThunkName, &M);
1797 if (!IsLocal) {
1798 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1799 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1800 }
1801
1802 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1803 IRBuilder<> IRB(BB);
1804 CallInst *WrapperCall = IRB.CreateCall(
1805 HwasanPersonalityWrapper,
1806 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1807 ThunkFn->getArg(3), ThunkFn->getArg(4),
1808 P.first ? P.first : Constant::getNullValue(PtrTy),
1809 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1810 WrapperCall->setTailCall();
1811 IRB.CreateRet(WrapperCall);
1812
1813 for (Function *F : P.second)
1814 F->setPersonalityFn(ThunkFn);
1815 }
1816}
1817
1818void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1819 bool InstrumentWithCalls) {
1820 Scale = kDefaultShadowScale;
1821 if (TargetTriple.isOSFuchsia()) {
1822 // Fuchsia is always PIE, which means that the beginning of the address
1823 // space is always available.
1824 InGlobal = false;
1825 InTls = false;
1826 Offset = 0;
1827 WithFrameRecord = true;
1828 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1829 InGlobal = false;
1830 InTls = false;
1832 WithFrameRecord = false;
1833 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1834 InGlobal = false;
1835 InTls = false;
1836 Offset = 0;
1837 WithFrameRecord = false;
1838 } else if (ClWithIfunc) {
1839 InGlobal = true;
1840 InTls = false;
1842 WithFrameRecord = false;
1843 } else if (ClWithTls) {
1844 InGlobal = false;
1845 InTls = true;
1847 WithFrameRecord = true;
1848 } else {
1849 InGlobal = false;
1850 InTls = false;
1852 WithFrameRecord = false;
1853 }
1854}
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:691
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1291
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static cl::opt< float > ClRandomSkipRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function."))
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cuttoff."))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:107
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:492
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:199
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:705
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2542
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2535
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2098
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:476
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:164
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:530
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:80
void setComdat(Comdat *C)
Definition: Globals.cpp:202
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:233
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:262
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:247
LinkageTypes getLinkage() const
Definition: GlobalValue.h:545
bool isDeclarationForLinker() const
Definition: GlobalValue.h:617
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:354
unsigned getAddressSpace() const
Definition: GlobalValue.h:204
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:293
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:67
bool hasCommonLinkage() const
Definition: GlobalValue.h:531
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:59
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:58
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:51
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:54
Type * getValueType() const
Definition: GlobalValue.h:295
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:467
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1881
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1978
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2170
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:595
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2122
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:104
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1437
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:526
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1095
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1378
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2245
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2249
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1790
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1416
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2021
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1475
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1803
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2007
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1497
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:569
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2253
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2196
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1456
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1519
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:631
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:221
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:756
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:112
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:162
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:129
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:771
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:769
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:989
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:911
bool isOSFuchsia() const
Definition: Triple.h:588
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
int getNumOccurrences() const
Definition: CommandLine.h:406
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1695
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1469
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
bool isLifetimeIntrinsic(Value *V)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4099
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:74
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec