LLVM 19.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
55#include "llvm/Support/Debug.h"
65#include <optional>
66#include <random>
67
68using namespace llvm;
69
70#define DEBUG_TYPE "hwasan"
71
72const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
73const char kHwasanNoteName[] = "hwasan.note";
74const char kHwasanInitName[] = "__hwasan_init";
75const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
76
78 "__hwasan_shadow_memory_dynamic_address";
79
80// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
81static const size_t kNumberOfAccessSizes = 5;
82
83static const size_t kDefaultShadowScale = 4;
85 std::numeric_limits<uint64_t>::max();
86
87static const unsigned kShadowBaseAlignment = 32;
88
90 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
91 cl::desc("Prefix for memory access callbacks"),
92 cl::Hidden, cl::init("__hwasan_"));
93
95 "hwasan-kernel-mem-intrinsic-prefix",
96 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
97 cl::init(false));
98
100 "hwasan-instrument-with-calls",
101 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
102 cl::init(false));
103
104static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
105 cl::desc("instrument read instructions"),
106 cl::Hidden, cl::init(true));
107
108static cl::opt<bool>
109 ClInstrumentWrites("hwasan-instrument-writes",
110 cl::desc("instrument write instructions"), cl::Hidden,
111 cl::init(true));
112
114 "hwasan-instrument-atomics",
115 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
116 cl::init(true));
117
118static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
119 cl::desc("instrument byval arguments"),
120 cl::Hidden, cl::init(true));
121
122static cl::opt<bool>
123 ClRecover("hwasan-recover",
124 cl::desc("Enable recovery mode (continue-after-error)."),
125 cl::Hidden, cl::init(false));
126
127static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
128 cl::desc("instrument stack (allocas)"),
129 cl::Hidden, cl::init(true));
130
131static cl::opt<bool>
132 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
133 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
135
137 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
139 cl::desc("How many lifetime ends to handle for a single alloca."),
141
142static cl::opt<bool>
143 ClUseAfterScope("hwasan-use-after-scope",
144 cl::desc("detect use after scope within function"),
145 cl::Hidden, cl::init(true));
146
148 "hwasan-generate-tags-with-calls",
149 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
150 cl::init(false));
151
152static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
153 cl::Hidden, cl::init(false));
154
156 "hwasan-match-all-tag",
157 cl::desc("don't report bad accesses via pointers with this tag"),
158 cl::Hidden, cl::init(-1));
159
160static cl::opt<bool>
161 ClEnableKhwasan("hwasan-kernel",
162 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
163 cl::Hidden, cl::init(false));
164
165// These flags allow to change the shadow mapping and control how shadow memory
166// is accessed. The shadow mapping looks like:
167// Shadow = (Mem >> scale) + offset
168
170 ClMappingOffset("hwasan-mapping-offset",
171 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
172 cl::Hidden, cl::init(0));
173
174static cl::opt<bool>
175 ClWithIfunc("hwasan-with-ifunc",
176 cl::desc("Access dynamic shadow through an ifunc global on "
177 "platforms that support this"),
178 cl::Hidden, cl::init(false));
179
181 "hwasan-with-tls",
182 cl::desc("Access dynamic shadow through an thread-local pointer on "
183 "platforms that support this"),
184 cl::Hidden, cl::init(true));
185
186static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
187 cl::desc("Hot percentile cuttoff."));
188
189static cl::opt<float>
190 ClRandomSkipRate("hwasan-random-rate",
191 cl::desc("Probability value in the range [0.0, 1.0] "
192 "to keep instrumentation of a function."));
193
194STATISTIC(NumTotalFuncs, "Number of total funcs");
195STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
196STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
197
198// Mode for selecting how to insert frame record info into the stack ring
199// buffer.
201 // Do not record frame record info.
203
204 // Insert instructions into the prologue for storing into the stack ring
205 // buffer directly.
207
208 // Add a call to __hwasan_add_frame_record in the runtime.
210};
211
213 "hwasan-record-stack-history",
214 cl::desc("Record stack frames with tagged allocations in a thread-local "
215 "ring buffer"),
216 cl::values(clEnumVal(none, "Do not record stack ring history"),
217 clEnumVal(instr, "Insert instructions into the prologue for "
218 "storing into the stack ring buffer directly"),
219 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
220 "storing into the stack ring buffer")),
222
223static cl::opt<bool>
224 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
225 cl::desc("instrument memory intrinsics"),
226 cl::Hidden, cl::init(true));
227
228static cl::opt<bool>
229 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
230 cl::desc("instrument landing pads"), cl::Hidden,
231 cl::init(false));
232
234 "hwasan-use-short-granules",
235 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
236 cl::init(false));
237
239 "hwasan-instrument-personality-functions",
240 cl::desc("instrument personality functions"), cl::Hidden);
241
242static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
243 cl::desc("inline all checks"),
244 cl::Hidden, cl::init(false));
245
246static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
247 cl::desc("inline all checks"),
248 cl::Hidden, cl::init(false));
249
250// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
251static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
252 cl::desc("Use page aliasing in HWASan"),
253 cl::Hidden, cl::init(false));
254
255namespace {
256
257template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
258 return Opt.getNumOccurrences() ? Opt : Other;
259}
260
261bool shouldUsePageAliases(const Triple &TargetTriple) {
262 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
263}
264
265bool shouldInstrumentStack(const Triple &TargetTriple) {
266 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
267}
268
269bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
270 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
271}
272
273bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
274 return optOr(ClUseStackSafety, !DisableOptimization);
275}
276
277bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
278 bool DisableOptimization) {
279 return shouldInstrumentStack(TargetTriple) &&
280 mightUseStackSafetyAnalysis(DisableOptimization);
281}
282
283bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
284 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
285}
286
287/// An instrumentation pass implementing detection of addressability bugs
288/// using tagged pointers.
289class HWAddressSanitizer {
290public:
291 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
292 const StackSafetyGlobalInfo *SSI)
293 : M(M), SSI(SSI) {
294 this->Recover = optOr(ClRecover, Recover);
295 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
296 this->Rng = ClRandomSkipRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
297 : nullptr;
298
299 initializeModule();
300 }
301
302 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
303
304private:
305 struct ShadowTagCheckInfo {
306 Instruction *TagMismatchTerm = nullptr;
307 Value *PtrLong = nullptr;
308 Value *AddrLong = nullptr;
309 Value *PtrTag = nullptr;
310 Value *MemTag = nullptr;
311 };
312
313 bool selectiveInstrumentationShouldSkip(Function &F,
315 void initializeModule();
316 void createHwasanCtorComdat();
317
318 void initializeCallbacks(Module &M);
319
320 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
321
322 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
323 Value *getShadowNonTls(IRBuilder<> &IRB);
324
325 void untagPointerOperand(Instruction *I, Value *Addr);
326 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
327
328 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
329 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
330 DomTreeUpdater &DTU, LoopInfo *LI);
331 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
332 unsigned AccessSizeIndex,
333 Instruction *InsertBefore,
334 DomTreeUpdater &DTU, LoopInfo *LI);
335 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
336 unsigned AccessSizeIndex,
337 Instruction *InsertBefore, DomTreeUpdater &DTU,
338 LoopInfo *LI);
339 bool ignoreMemIntrinsic(MemIntrinsic *MI);
340 void instrumentMemIntrinsic(MemIntrinsic *MI);
341 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
342 LoopInfo *LI);
343 bool ignoreAccess(Instruction *Inst, Value *Ptr);
344 void getInterestingMemoryOperands(
345 Instruction *I, const TargetLibraryInfo &TLI,
347
348 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
349 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
350 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
351 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
352 const DominatorTree &DT, const PostDominatorTree &PDT,
353 const LoopInfo &LI);
354 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
355 Value *getNextTagWithCall(IRBuilder<> &IRB);
356 Value *getStackBaseTag(IRBuilder<> &IRB);
357 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
358 Value *getUARTag(IRBuilder<> &IRB);
359
360 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
361 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
362 unsigned retagMask(unsigned AllocaNo);
363
364 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
365
366 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
367 void instrumentGlobals();
368
369 Value *getCachedFP(IRBuilder<> &IRB);
370 Value *getFrameRecordInfo(IRBuilder<> &IRB);
371
372 void instrumentPersonalityFunctions();
373
374 LLVMContext *C;
375 Module &M;
376 const StackSafetyGlobalInfo *SSI;
377 Triple TargetTriple;
378 std::unique_ptr<RandomNumberGenerator> Rng;
379
380 /// This struct defines the shadow mapping using the rule:
381 /// shadow = (mem >> Scale) + Offset.
382 /// If InGlobal is true, then
383 /// extern char __hwasan_shadow[];
384 /// shadow = (mem >> Scale) + &__hwasan_shadow
385 /// If InTls is true, then
386 /// extern char *__hwasan_tls;
387 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
388 ///
389 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
390 /// ring buffer for storing stack allocations on targets that support it.
391 struct ShadowMapping {
392 uint8_t Scale;
394 bool InGlobal;
395 bool InTls;
396 bool WithFrameRecord;
397
398 void init(Triple &TargetTriple, bool InstrumentWithCalls);
399 Align getObjectAlignment() const { return Align(1ULL << Scale); }
400 };
401
402 ShadowMapping Mapping;
403
404 Type *VoidTy = Type::getVoidTy(M.getContext());
405 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
406 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
407 Type *Int8Ty = Type::getInt8Ty(M.getContext());
408 Type *Int32Ty = Type::getInt32Ty(M.getContext());
409 Type *Int64Ty = Type::getInt64Ty(M.getContext());
410
411 bool CompileKernel;
412 bool Recover;
413 bool OutlinedChecks;
414 bool InlineFastPath;
415 bool UseShortGranules;
416 bool InstrumentLandingPads;
417 bool InstrumentWithCalls;
418 bool InstrumentStack;
419 bool InstrumentGlobals;
420 bool DetectUseAfterScope;
421 bool UsePageAliases;
422 bool UseMatchAllCallback;
423
424 std::optional<uint8_t> MatchAllTag;
425
426 unsigned PointerTagShift;
427 uint64_t TagMaskByte;
428
429 Function *HwasanCtorFunction;
430
431 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
432 FunctionCallee HwasanMemoryAccessCallbackSized[2];
433
434 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
435 FunctionCallee HwasanHandleVfork;
436
437 FunctionCallee HwasanTagMemoryFunc;
438 FunctionCallee HwasanGenerateTagFunc;
439 FunctionCallee HwasanRecordFrameRecordFunc;
440
441 Constant *ShadowGlobal;
442
443 Value *ShadowBase = nullptr;
444 Value *StackBaseTag = nullptr;
445 Value *CachedFP = nullptr;
446 GlobalValue *ThreadPtrGlobal = nullptr;
447};
448
449} // end anonymous namespace
450
453 const StackSafetyGlobalInfo *SSI = nullptr;
454 auto TargetTriple = llvm::Triple(M.getTargetTriple());
455 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
457
458 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
459 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
460 for (Function &F : M)
461 HWASan.sanitizeFunction(F, FAM);
462
464 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
465 // are incrementally updated throughout this pass whenever
466 // SplitBlockAndInsertIfThen is called.
470 // GlobalsAA is considered stateless and does not get invalidated unless
471 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
472 // make changes that require GlobalsAA to be invalidated.
473 PA.abandon<GlobalsAA>();
474 return PA;
475}
477 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
479 OS, MapClassName2PassName);
480 OS << '<';
481 if (Options.CompileKernel)
482 OS << "kernel;";
483 if (Options.Recover)
484 OS << "recover";
485 OS << '>';
486}
487
488void HWAddressSanitizer::createHwasanCtorComdat() {
489 std::tie(HwasanCtorFunction, std::ignore) =
492 /*InitArgTypes=*/{},
493 /*InitArgs=*/{},
494 // This callback is invoked when the functions are created the first
495 // time. Hook them into the global ctors list in that case:
496 [&](Function *Ctor, FunctionCallee) {
497 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
498 Ctor->setComdat(CtorComdat);
499 appendToGlobalCtors(M, Ctor, 0, Ctor);
500 });
501
502 // Create a note that contains pointers to the list of global
503 // descriptors. Adding a note to the output file will cause the linker to
504 // create a PT_NOTE program header pointing to the note that we can use to
505 // find the descriptor list starting from the program headers. A function
506 // provided by the runtime initializes the shadow memory for the globals by
507 // accessing the descriptor list via the note. The dynamic loader needs to
508 // call this function whenever a library is loaded.
509 //
510 // The reason why we use a note for this instead of a more conventional
511 // approach of having a global constructor pass a descriptor list pointer to
512 // the runtime is because of an order of initialization problem. With
513 // constructors we can encounter the following problematic scenario:
514 //
515 // 1) library A depends on library B and also interposes one of B's symbols
516 // 2) B's constructors are called before A's (as required for correctness)
517 // 3) during construction, B accesses one of its "own" globals (actually
518 // interposed by A) and triggers a HWASAN failure due to the initialization
519 // for A not having happened yet
520 //
521 // Even without interposition it is possible to run into similar situations in
522 // cases where two libraries mutually depend on each other.
523 //
524 // We only need one note per binary, so put everything for the note in a
525 // comdat. This needs to be a comdat with an .init_array section to prevent
526 // newer versions of lld from discarding the note.
527 //
528 // Create the note even if we aren't instrumenting globals. This ensures that
529 // binaries linked from object files with both instrumented and
530 // non-instrumented globals will end up with a note, even if a comdat from an
531 // object file with non-instrumented globals is selected. The note is harmless
532 // if the runtime doesn't support it, since it will just be ignored.
533 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
534
535 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
536 auto *Start =
537 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
538 nullptr, "__start_hwasan_globals");
539 Start->setVisibility(GlobalValue::HiddenVisibility);
540 auto *Stop =
541 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
542 nullptr, "__stop_hwasan_globals");
543 Stop->setVisibility(GlobalValue::HiddenVisibility);
544
545 // Null-terminated so actually 8 bytes, which are required in order to align
546 // the note properly.
547 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
548
549 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
551 auto *Note =
552 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
554 Note->setSection(".note.hwasan.globals");
555 Note->setComdat(NoteComdat);
556 Note->setAlignment(Align(4));
557
558 // The pointers in the note need to be relative so that the note ends up being
559 // placed in rodata, which is the standard location for notes.
560 auto CreateRelPtr = [&](Constant *Ptr) {
564 Int32Ty);
565 };
566 Note->setInitializer(ConstantStruct::getAnon(
567 {ConstantInt::get(Int32Ty, 8), // n_namesz
568 ConstantInt::get(Int32Ty, 8), // n_descsz
569 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
570 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
572
573 // Create a zero-length global in hwasan_globals so that the linker will
574 // always create start and stop symbols.
575 auto *Dummy = new GlobalVariable(
576 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
577 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
578 Dummy->setSection("hwasan_globals");
579 Dummy->setComdat(NoteComdat);
580 Dummy->setMetadata(LLVMContext::MD_associated,
582 appendToCompilerUsed(M, Dummy);
583}
584
585/// Module-level initialization.
586///
587/// inserts a call to __hwasan_init to the module's constructor list.
588void HWAddressSanitizer::initializeModule() {
589 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
590 TargetTriple = Triple(M.getTargetTriple());
591
592 // x86_64 currently has two modes:
593 // - Intel LAM (default)
594 // - pointer aliasing (heap only)
595 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
596 UsePageAliases = shouldUsePageAliases(TargetTriple);
597 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
598 InstrumentStack = shouldInstrumentStack(TargetTriple);
599 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
600 PointerTagShift = IsX86_64 ? 57 : 56;
601 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
602
603 Mapping.init(TargetTriple, InstrumentWithCalls);
604
605 C = &(M.getContext());
606 IRBuilder<> IRB(*C);
607
608 HwasanCtorFunction = nullptr;
609
610 // Older versions of Android do not have the required runtime support for
611 // short granules, global or personality function instrumentation. On other
612 // platforms we currently require using the latest version of the runtime.
613 bool NewRuntime =
614 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
615
616 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
617 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
618 TargetTriple.isOSBinFormatELF() &&
619 !optOr(ClInlineAllChecks, Recover);
620
621 // These platforms may prefer less inlining to reduce binary size.
622 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
623 TargetTriple.isOSFuchsia()));
624
625 if (ClMatchAllTag.getNumOccurrences()) {
626 if (ClMatchAllTag != -1) {
627 MatchAllTag = ClMatchAllTag & 0xFF;
628 }
629 } else if (CompileKernel) {
630 MatchAllTag = 0xFF;
631 }
632 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
633
634 // If we don't have personality function support, fall back to landing pads.
635 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
636
637 InstrumentGlobals =
638 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
639
640 if (!CompileKernel) {
641 createHwasanCtorComdat();
642
643 if (InstrumentGlobals)
644 instrumentGlobals();
645
646 bool InstrumentPersonalityFunctions =
647 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
648 if (InstrumentPersonalityFunctions)
649 instrumentPersonalityFunctions();
650 }
651
652 if (!TargetTriple.isAndroid()) {
653 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
654 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
656 "__hwasan_tls", nullptr,
659 return GV;
660 });
661 ThreadPtrGlobal = cast<GlobalVariable>(C);
662 }
663}
664
665void HWAddressSanitizer::initializeCallbacks(Module &M) {
666 IRBuilder<> IRB(*C);
667 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
668 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
669 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
670 *HwasanMemsetFnTy;
671 if (UseMatchAllCallback) {
672 HwasanMemoryAccessCallbackSizedFnTy =
673 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
674 HwasanMemoryAccessCallbackFnTy =
675 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
676 HwasanMemTransferFnTy =
677 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
678 HwasanMemsetFnTy =
679 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
680 } else {
681 HwasanMemoryAccessCallbackSizedFnTy =
682 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
683 HwasanMemoryAccessCallbackFnTy =
684 FunctionType::get(VoidTy, {IntptrTy}, false);
685 HwasanMemTransferFnTy =
686 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
687 HwasanMemsetFnTy =
688 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
689 }
690
691 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
692 const std::string TypeStr = AccessIsWrite ? "store" : "load";
693 const std::string EndingStr = Recover ? "_noabort" : "";
694
695 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
696 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
697 HwasanMemoryAccessCallbackSizedFnTy);
698
699 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
700 AccessSizeIndex++) {
701 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
702 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
703 itostr(1ULL << AccessSizeIndex) +
704 MatchAllStr + EndingStr,
705 HwasanMemoryAccessCallbackFnTy);
706 }
707 }
708
709 const std::string MemIntrinCallbackPrefix =
710 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
711 ? std::string("")
713
714 HwasanMemmove = M.getOrInsertFunction(
715 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
716 HwasanMemcpy = M.getOrInsertFunction(
717 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
718 HwasanMemset = M.getOrInsertFunction(
719 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
720
721 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
722 PtrTy, Int8Ty, IntptrTy);
723 HwasanGenerateTagFunc =
724 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
725
726 HwasanRecordFrameRecordFunc =
727 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
728
729 ShadowGlobal =
730 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
731
732 HwasanHandleVfork =
733 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
734}
735
736Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
737 // An empty inline asm with input reg == output reg.
738 // An opaque no-op cast, basically.
739 // This prevents code bloat as a result of rematerializing trivial definitions
740 // such as constants or global addresses at every load and store.
741 InlineAsm *Asm =
742 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
743 StringRef(""), StringRef("=r,0"),
744 /*hasSideEffects=*/false);
745 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
746}
747
748Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
749 return getOpaqueNoopCast(IRB, ShadowGlobal);
750}
751
752Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
753 if (Mapping.Offset != kDynamicShadowSentinel)
754 return getOpaqueNoopCast(
756 ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
757
758 if (Mapping.InGlobal)
759 return getDynamicShadowIfunc(IRB);
760
761 Value *GlobalDynamicAddress =
764 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
765}
766
767bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
768 // Do not instrument accesses from different address spaces; we cannot deal
769 // with them.
770 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
771 if (PtrTy->getPointerAddressSpace() != 0)
772 return true;
773
774 // Ignore swifterror addresses.
775 // swifterror memory addresses are mem2reg promoted by instruction
776 // selection. As such they cannot have regular uses like an instrumentation
777 // function and it makes no sense to track them as memory.
778 if (Ptr->isSwiftError())
779 return true;
780
781 if (findAllocaForValue(Ptr)) {
782 if (!InstrumentStack)
783 return true;
784 if (SSI && SSI->stackAccessIsSafe(*Inst))
785 return true;
786 }
787
788 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
789 if (!InstrumentGlobals)
790 return true;
791 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
792 }
793
794 return false;
795}
796
797void HWAddressSanitizer::getInterestingMemoryOperands(
798 Instruction *I, const TargetLibraryInfo &TLI,
800 // Skip memory accesses inserted by another instrumentation.
801 if (I->hasMetadata(LLVMContext::MD_nosanitize))
802 return;
803
804 // Do not instrument the load fetching the dynamic shadow address.
805 if (ShadowBase == I)
806 return;
807
808 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
809 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
810 return;
811 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
812 LI->getType(), LI->getAlign());
813 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
814 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
815 return;
816 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
817 SI->getValueOperand()->getType(), SI->getAlign());
818 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
819 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
820 return;
821 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
822 RMW->getValOperand()->getType(), std::nullopt);
823 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
824 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
825 return;
826 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
827 XCHG->getCompareOperand()->getType(),
828 std::nullopt);
829 } else if (auto *CI = dyn_cast<CallInst>(I)) {
830 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
831 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
832 ignoreAccess(I, CI->getArgOperand(ArgNo)))
833 continue;
834 Type *Ty = CI->getParamByValType(ArgNo);
835 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
836 }
838 }
839}
840
842 if (LoadInst *LI = dyn_cast<LoadInst>(I))
843 return LI->getPointerOperandIndex();
844 if (StoreInst *SI = dyn_cast<StoreInst>(I))
845 return SI->getPointerOperandIndex();
846 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
847 return RMW->getPointerOperandIndex();
848 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
849 return XCHG->getPointerOperandIndex();
850 report_fatal_error("Unexpected instruction");
851 return -1;
852}
853
855 size_t Res = llvm::countr_zero(TypeSize / 8);
857 return Res;
858}
859
860void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
861 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
862 TargetTriple.isRISCV64())
863 return;
864
865 IRBuilder<> IRB(I);
866 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
867 Value *UntaggedPtr =
868 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
869 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
870}
871
872Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
873 // Mem >> Scale
874 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
875 if (Mapping.Offset == 0)
876 return IRB.CreateIntToPtr(Shadow, PtrTy);
877 // (Mem >> Scale) + Offset
878 return IRB.CreatePtrAdd(ShadowBase, Shadow);
879}
880
881int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
882 unsigned AccessSizeIndex) {
883 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
884 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
885 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
886 (Recover << HWASanAccessInfo::RecoverShift) |
887 (IsWrite << HWASanAccessInfo::IsWriteShift) |
888 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
889}
890
891HWAddressSanitizer::ShadowTagCheckInfo
892HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
893 DomTreeUpdater &DTU, LoopInfo *LI) {
894 ShadowTagCheckInfo R;
895
896 IRBuilder<> IRB(InsertBefore);
897
898 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
899 R.PtrTag =
900 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
901 R.AddrLong = untagPointer(IRB, R.PtrLong);
902 Value *Shadow = memToShadow(R.AddrLong, IRB);
903 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
904 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
905
906 if (MatchAllTag.has_value()) {
907 Value *TagNotIgnored = IRB.CreateICmpNE(
908 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
909 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
910 }
911
912 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
913 TagMismatch, InsertBefore, false,
914 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
915
916 return R;
917}
918
919void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
920 unsigned AccessSizeIndex,
921 Instruction *InsertBefore,
922 DomTreeUpdater &DTU,
923 LoopInfo *LI) {
924 assert(!UsePageAliases);
925 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
926
927 if (InlineFastPath)
928 InsertBefore =
929 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
930
931 IRBuilder<> IRB(InsertBefore);
933 bool useFixedShadowIntrinsic = false;
934 // The memaccess fixed shadow intrinsic is only supported on AArch64,
935 // which allows a 16-bit immediate to be left-shifted by 32.
936 // Since kShadowBaseAlignment == 32, and Linux by default will not
937 // mmap above 48-bits, practically any valid shadow offset is
938 // representable.
939 // In particular, an offset of 4TB (1024 << 32) is representable, and
940 // ought to be good enough for anybody.
941 if (TargetTriple.isAArch64() && Mapping.Offset != kDynamicShadowSentinel) {
942 uint16_t offset_shifted = Mapping.Offset >> 32;
943 useFixedShadowIntrinsic = (uint64_t)offset_shifted << 32 == Mapping.Offset;
944 }
945
946 if (useFixedShadowIntrinsic)
947 IRB.CreateCall(
949 M, UseShortGranules
950 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
951 : Intrinsic::hwasan_check_memaccess_fixedshadow),
952 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
953 ConstantInt::get(Int64Ty, Mapping.Offset)});
954 else
956 M, UseShortGranules
957 ? Intrinsic::hwasan_check_memaccess_shortgranules
958 : Intrinsic::hwasan_check_memaccess),
959 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
960}
961
962void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
963 unsigned AccessSizeIndex,
964 Instruction *InsertBefore,
965 DomTreeUpdater &DTU,
966 LoopInfo *LI) {
967 assert(!UsePageAliases);
968 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
969
970 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
971
972 IRBuilder<> IRB(TCI.TagMismatchTerm);
973 Value *OutOfShortGranuleTagRange =
974 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
975 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
976 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
977 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
978
979 IRB.SetInsertPoint(TCI.TagMismatchTerm);
980 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
981 PtrLowBits = IRB.CreateAdd(
982 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
983 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
984 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
986 LI, CheckFailTerm->getParent());
987
988 IRB.SetInsertPoint(TCI.TagMismatchTerm);
989 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
990 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
991 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
992 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
993 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
995 LI, CheckFailTerm->getParent());
996
997 IRB.SetInsertPoint(CheckFailTerm);
998 InlineAsm *Asm;
999 switch (TargetTriple.getArch()) {
1000 case Triple::x86_64:
1001 // The signal handler will find the data address in rdi.
1003 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1004 "int3\nnopl " +
1005 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1006 "(%rax)",
1007 "{rdi}",
1008 /*hasSideEffects=*/true);
1009 break;
1010 case Triple::aarch64:
1011 case Triple::aarch64_be:
1012 // The signal handler will find the data address in x0.
1014 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1015 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1016 "{x0}",
1017 /*hasSideEffects=*/true);
1018 break;
1019 case Triple::riscv64:
1020 // The signal handler will find the data address in x10.
1022 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1023 "ebreak\naddiw x0, x11, " +
1024 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1025 "{x10}",
1026 /*hasSideEffects=*/true);
1027 break;
1028 default:
1029 report_fatal_error("unsupported architecture");
1030 }
1031 IRB.CreateCall(Asm, TCI.PtrLong);
1032 if (Recover)
1033 cast<BranchInst>(CheckFailTerm)
1034 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1035}
1036
1037bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
1038 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1039 return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
1040 (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
1041 }
1042 if (isa<MemSetInst>(MI))
1043 return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
1044 return false;
1045}
1046
1047void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1048 IRBuilder<> IRB(MI);
1049 if (isa<MemTransferInst>(MI)) {
1051 MI->getOperand(0), MI->getOperand(1),
1052 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1053
1054 if (UseMatchAllCallback)
1055 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1056 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1057 } else if (isa<MemSetInst>(MI)) {
1059 MI->getOperand(0),
1060 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1061 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1062 if (UseMatchAllCallback)
1063 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1064 IRB.CreateCall(HwasanMemset, Args);
1065 }
1066 MI->eraseFromParent();
1067}
1068
1069bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1070 DomTreeUpdater &DTU,
1071 LoopInfo *LI) {
1072 Value *Addr = O.getPtr();
1073
1074 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1075
1076 if (O.MaybeMask)
1077 return false; // FIXME
1078
1079 IRBuilder<> IRB(O.getInsn());
1080 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1081 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1082 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1083 *O.Alignment >= O.TypeStoreSize / 8)) {
1084 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1085 if (InstrumentWithCalls) {
1087 if (UseMatchAllCallback)
1088 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1089 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1090 Args);
1091 } else if (OutlinedChecks) {
1092 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1093 DTU, LI);
1094 } else {
1095 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1096 DTU, LI);
1097 }
1098 } else {
1100 IRB.CreatePointerCast(Addr, IntptrTy),
1101 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1102 ConstantInt::get(IntptrTy, 8))};
1103 if (UseMatchAllCallback)
1104 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1105 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1106 }
1107 untagPointerOperand(O.getInsn(), Addr);
1108
1109 return true;
1110}
1111
1112void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1113 size_t Size) {
1114 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1115 if (!UseShortGranules)
1116 Size = AlignedSize;
1117
1118 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1119 if (InstrumentWithCalls) {
1120 IRB.CreateCall(HwasanTagMemoryFunc,
1121 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1122 ConstantInt::get(IntptrTy, AlignedSize)});
1123 } else {
1124 size_t ShadowSize = Size >> Mapping.Scale;
1125 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1126 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1127 // If this memset is not inlined, it will be intercepted in the hwasan
1128 // runtime library. That's OK, because the interceptor skips the checks if
1129 // the address is in the shadow region.
1130 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1131 // llvm.memset right here into either a sequence of stores, or a call to
1132 // hwasan_tag_memory.
1133 if (ShadowSize)
1134 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1135 if (Size != AlignedSize) {
1136 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1137 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1138 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1139 IRB.CreateStore(
1140 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1141 AlignedSize - 1));
1142 }
1143 }
1144}
1145
1146unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1147 if (TargetTriple.getArch() == Triple::x86_64)
1148 return AllocaNo & TagMaskByte;
1149
1150 // A list of 8-bit numbers that have at most one run of non-zero bits.
1151 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1152 // masks.
1153 // The list does not include the value 255, which is used for UAR.
1154 //
1155 // Because we are more likely to use earlier elements of this list than later
1156 // ones, it is sorted in increasing order of probability of collision with a
1157 // mask allocated (temporally) nearby. The program that generated this list
1158 // can be found at:
1159 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1160 static const unsigned FastMasks[] = {
1161 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1162 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1163 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1164 return FastMasks[AllocaNo % std::size(FastMasks)];
1165}
1166
1167Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1168 if (TagMaskByte == 0xFF)
1169 return OldTag; // No need to clear the tag byte.
1170 return IRB.CreateAnd(OldTag,
1171 ConstantInt::get(OldTag->getType(), TagMaskByte));
1172}
1173
1174Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1175 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1176}
1177
1178Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1180 return nullptr;
1181 if (StackBaseTag)
1182 return StackBaseTag;
1183 // Extract some entropy from the stack pointer for the tags.
1184 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1185 // between functions).
1186 Value *FramePointerLong = getCachedFP(IRB);
1187 Value *StackTag =
1188 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1189 IRB.CreateLShr(FramePointerLong, 20)));
1190 StackTag->setName("hwasan.stack.base.tag");
1191 return StackTag;
1192}
1193
1194Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1195 unsigned AllocaNo) {
1197 return getNextTagWithCall(IRB);
1198 return IRB.CreateXor(
1199 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1200}
1201
1202Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1203 Value *FramePointerLong = getCachedFP(IRB);
1204 Value *UARTag =
1205 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1206
1207 UARTag->setName("hwasan.uar.tag");
1208 return UARTag;
1209}
1210
1211// Add a tag to an address.
1212Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1213 Value *PtrLong, Value *Tag) {
1214 assert(!UsePageAliases);
1215 Value *TaggedPtrLong;
1216 if (CompileKernel) {
1217 // Kernel addresses have 0xFF in the most significant byte.
1218 Value *ShiftedTag =
1219 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1220 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1221 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1222 } else {
1223 // Userspace can simply do OR (tag << PointerTagShift);
1224 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1225 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1226 }
1227 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1228}
1229
1230// Remove tag from an address.
1231Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1232 assert(!UsePageAliases);
1233 Value *UntaggedPtrLong;
1234 if (CompileKernel) {
1235 // Kernel addresses have 0xFF in the most significant byte.
1236 UntaggedPtrLong =
1237 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1238 TagMaskByte << PointerTagShift));
1239 } else {
1240 // Userspace addresses have 0x00.
1241 UntaggedPtrLong = IRB.CreateAnd(
1242 PtrLong, ConstantInt::get(PtrLong->getType(),
1243 ~(TagMaskByte << PointerTagShift)));
1244 }
1245 return UntaggedPtrLong;
1246}
1247
1248Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1249 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1250 // in Bionic's libc/platform/bionic/tls_defines.h.
1251 constexpr int SanitizerSlot = 6;
1252 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1253 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1254 return ThreadPtrGlobal;
1255}
1256
1257Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1258 if (!CachedFP)
1259 CachedFP = memtag::getFP(IRB);
1260 return CachedFP;
1261}
1262
1263Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1264 // Prepare ring buffer data.
1265 Value *PC = memtag::getPC(TargetTriple, IRB);
1266 Value *FP = getCachedFP(IRB);
1267
1268 // Mix FP and PC.
1269 // Assumptions:
1270 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1271 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1272 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1273 // 0xFFFFPPPPPPPPPPPP
1274 FP = IRB.CreateShl(FP, 44);
1275 return IRB.CreateOr(PC, FP);
1276}
1277
1278void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1279 if (!Mapping.InTls)
1280 ShadowBase = getShadowNonTls(IRB);
1281 else if (!WithFrameRecord && TargetTriple.isAndroid())
1282 ShadowBase = getDynamicShadowIfunc(IRB);
1283
1284 if (!WithFrameRecord && ShadowBase)
1285 return;
1286
1287 Value *SlotPtr = nullptr;
1288 Value *ThreadLong = nullptr;
1289 Value *ThreadLongMaybeUntagged = nullptr;
1290
1291 auto getThreadLongMaybeUntagged = [&]() {
1292 if (!SlotPtr)
1293 SlotPtr = getHwasanThreadSlotPtr(IRB);
1294 if (!ThreadLong)
1295 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1296 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1297 // TBI.
1298 return TargetTriple.isAArch64() ? ThreadLong
1299 : untagPointer(IRB, ThreadLong);
1300 };
1301
1302 if (WithFrameRecord) {
1303 switch (ClRecordStackHistory) {
1304 case libcall: {
1305 // Emit a runtime call into hwasan rather than emitting instructions for
1306 // recording stack history.
1307 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1308 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1309 break;
1310 }
1311 case instr: {
1312 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1313
1314 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1315
1316 // Store data to ring buffer.
1317 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1318 Value *RecordPtr =
1319 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1320 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1321
1322 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1323 // buffer in pages, it must be a power of two, and the start of the buffer
1324 // must be aligned by twice that much. Therefore wrap around of the ring
1325 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1326 // The use of AShr instead of LShr is due to
1327 // https://bugs.llvm.org/show_bug.cgi?id=39030
1328 // Runtime library makes sure not to use the highest bit.
1329 //
1330 // Mechanical proof of this address calculation can be found at:
1331 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/prove_hwasanwrap.smt2
1332 //
1333 // Example of the wrap case for N = 1
1334 // Pointer: 0x01AAAAAAAAAAAFF8
1335 // +
1336 // 0x0000000000000008
1337 // =
1338 // 0x01AAAAAAAAAAB000
1339 // &
1340 // WrapMask: 0xFFFFFFFFFFFFF000
1341 // =
1342 // 0x01AAAAAAAAAAA000
1343 //
1344 // Then the WrapMask will be a no-op until the next wrap case.
1345 Value *WrapMask = IRB.CreateXor(
1346 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1347 ConstantInt::get(IntptrTy, (uint64_t)-1));
1348 Value *ThreadLongNew = IRB.CreateAnd(
1349 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1350 IRB.CreateStore(ThreadLongNew, SlotPtr);
1351 break;
1352 }
1353 case none: {
1355 "A stack history recording mode should've been selected.");
1356 }
1357 }
1358 }
1359
1360 if (!ShadowBase) {
1361 if (!ThreadLongMaybeUntagged)
1362 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1363
1364 // Get shadow base address by aligning RecordPtr up.
1365 // Note: this is not correct if the pointer is already aligned.
1366 // Runtime library will make sure this never happens.
1367 ShadowBase = IRB.CreateAdd(
1368 IRB.CreateOr(
1369 ThreadLongMaybeUntagged,
1370 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1371 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1372 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1373 }
1374}
1375
1376bool HWAddressSanitizer::instrumentLandingPads(
1377 SmallVectorImpl<Instruction *> &LandingPadVec) {
1378 for (auto *LP : LandingPadVec) {
1379 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1380 IRB.CreateCall(
1381 HwasanHandleVfork,
1383 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1384 }
1385 return true;
1386}
1387
1388bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1389 Value *StackTag, Value *UARTag,
1390 const DominatorTree &DT,
1391 const PostDominatorTree &PDT,
1392 const LoopInfo &LI) {
1393 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1394 // alloca addresses using that. Unfortunately, offsets are not known yet
1395 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1396 // temp, shift-OR it into each alloca address and xor with the retag mask.
1397 // This generates one extra instruction per alloca use.
1398 unsigned int I = 0;
1399
1400 for (auto &KV : SInfo.AllocasToInstrument) {
1401 auto N = I++;
1402 auto *AI = KV.first;
1403 memtag::AllocaInfo &Info = KV.second;
1405
1406 // Replace uses of the alloca with tagged address.
1407 Value *Tag = getAllocaTag(IRB, StackTag, N);
1408 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1409 Value *AINoTagLong = untagPointer(IRB, AILong);
1410 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1411 std::string Name =
1412 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1413 Replacement->setName(Name + ".hwasan");
1414
1415 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1416 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1417
1418 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1419
1420 auto HandleLifetime = [&](IntrinsicInst *II) {
1421 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1422 // set of assumptions we need to make about the lifetime. Without this we
1423 // would need to ensure that we can track the lifetime pointer to a
1424 // constant offset from the alloca, and would still need to change the
1425 // size to include the extra alignment we use for the untagging to make
1426 // the size consistent.
1427 //
1428 // The check for standard lifetime below makes sure that we have exactly
1429 // one set of start / end in any execution (i.e. the ends are not
1430 // reachable from each other), so this will not cause any problems.
1431 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1432 II->setArgOperand(1, AICast);
1433 };
1434 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1435 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1436
1437 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1438 auto *User = U.getUser();
1439 return User != AILong && User != AICast &&
1441 });
1442
1443 memtag::annotateDebugRecords(Info, retagMask(N));
1444
1445 auto TagEnd = [&](Instruction *Node) {
1446 IRB.SetInsertPoint(Node);
1447 // When untagging, use the `AlignedSize` because we need to set the tags
1448 // for the entire alloca to original. If we used `Size` here, we would
1449 // keep the last granule tagged, and store zero in the last byte of the
1450 // last granule, due to how short granules are implemented.
1451 tagAlloca(IRB, AI, UARTag, AlignedSize);
1452 };
1453 // Calls to functions that may return twice (e.g. setjmp) confuse the
1454 // postdominator analysis, and will leave us to keep memory tagged after
1455 // function return. Work around this by always untagging at every return
1456 // statement if return_twice functions are called.
1457 bool StandardLifetime =
1458 !SInfo.CallsReturnTwice &&
1459 SInfo.UnrecognizedLifetimes.empty() &&
1460 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1461 &LI, ClMaxLifetimes);
1462 if (DetectUseAfterScope && StandardLifetime) {
1463 IntrinsicInst *Start = Info.LifetimeStart[0];
1464 IRB.SetInsertPoint(Start->getNextNode());
1465 tagAlloca(IRB, AI, Tag, Size);
1466 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1467 SInfo.RetVec, TagEnd)) {
1468 for (auto *End : Info.LifetimeEnd)
1469 End->eraseFromParent();
1470 }
1471 } else {
1472 tagAlloca(IRB, AI, Tag, Size);
1473 for (auto *RI : SInfo.RetVec)
1474 TagEnd(RI);
1475 // We inserted tagging outside of the lifetimes, so we have to remove
1476 // them.
1477 for (auto &II : Info.LifetimeStart)
1478 II->eraseFromParent();
1479 for (auto &II : Info.LifetimeEnd)
1480 II->eraseFromParent();
1481 }
1482 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1483 }
1484 for (auto &I : SInfo.UnrecognizedLifetimes)
1485 I->eraseFromParent();
1486 return true;
1487}
1488
1490 bool Skip) {
1491 if (Skip) {
1492 ORE.emit([&]() {
1493 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1494 << "Skipped: F=" << ore::NV("Function", &F);
1495 });
1496 } else {
1497 ORE.emit([&]() {
1498 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1499 << "Sanitized: F=" << ore::NV("Function", &F);
1500 });
1501 }
1502}
1503
1504bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1506 bool Skip = [&]() {
1507 if (ClRandomSkipRate.getNumOccurrences()) {
1508 std::bernoulli_distribution D(ClRandomSkipRate);
1509 return !D(*Rng);
1510 }
1511 if (!ClHotPercentileCutoff.getNumOccurrences())
1512 return false;
1514 ProfileSummaryInfo *PSI =
1515 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1516 if (!PSI || !PSI->hasProfileSummary()) {
1517 ++NumNoProfileSummaryFuncs;
1518 return false;
1519 }
1520 return PSI->isFunctionHotInCallGraphNthPercentile(
1522 }();
1524 return Skip;
1525}
1526
1527void HWAddressSanitizer::sanitizeFunction(Function &F,
1529 if (&F == HwasanCtorFunction)
1530 return;
1531
1532 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1533 return;
1534
1535 if (F.empty())
1536 return;
1537
1538 NumTotalFuncs++;
1539
1540 if (selectiveInstrumentationShouldSkip(F, FAM))
1541 return;
1542
1543 NumInstrumentedFuncs++;
1544
1545 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1546
1547 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1548 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1549 SmallVector<Instruction *, 8> LandingPadVec;
1551
1552 memtag::StackInfoBuilder SIB(SSI);
1553 for (auto &Inst : instructions(F)) {
1554 if (InstrumentStack) {
1555 SIB.visit(Inst);
1556 }
1557
1558 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1559 LandingPadVec.push_back(&Inst);
1560
1561 getInterestingMemoryOperands(&Inst, TLI, OperandsToInstrument);
1562
1563 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1564 if (!ignoreMemIntrinsic(MI))
1565 IntrinToInstrument.push_back(MI);
1566 }
1567
1568 memtag::StackInfo &SInfo = SIB.get();
1569
1570 initializeCallbacks(*F.getParent());
1571
1572 if (!LandingPadVec.empty())
1573 instrumentLandingPads(LandingPadVec);
1574
1575 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1576 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1577 // __hwasan_personality_thunk is a no-op for functions without an
1578 // instrumented stack, so we can drop it.
1579 F.setPersonalityFn(nullptr);
1580 }
1581
1582 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1583 IntrinToInstrument.empty())
1584 return;
1585
1586 assert(!ShadowBase);
1587
1588 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1589 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1590 emitPrologue(EntryIRB,
1591 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1592 Mapping.WithFrameRecord &&
1593 !SInfo.AllocasToInstrument.empty());
1594
1595 if (!SInfo.AllocasToInstrument.empty()) {
1598 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1599 Value *StackTag = getStackBaseTag(EntryIRB);
1600 Value *UARTag = getUARTag(EntryIRB);
1601 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1602 }
1603
1604 // If we split the entry block, move any allocas that were originally in the
1605 // entry block back into the entry block so that they aren't treated as
1606 // dynamic allocas.
1607 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1608 InsertPt = F.getEntryBlock().begin();
1609 for (Instruction &I :
1610 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1611 if (auto *AI = dyn_cast<AllocaInst>(&I))
1612 if (isa<ConstantInt>(AI->getArraySize()))
1613 I.moveBefore(F.getEntryBlock(), InsertPt);
1614 }
1615 }
1616
1621 for (auto &Operand : OperandsToInstrument)
1622 instrumentMemAccess(Operand, DTU, LI);
1623 DTU.flush();
1624
1625 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1626 for (auto *Inst : IntrinToInstrument)
1627 instrumentMemIntrinsic(Inst);
1628 }
1629
1630 ShadowBase = nullptr;
1631 StackBaseTag = nullptr;
1632 CachedFP = nullptr;
1633}
1634
1635void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1636 assert(!UsePageAliases);
1637 Constant *Initializer = GV->getInitializer();
1638 uint64_t SizeInBytes =
1639 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1640 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1641 if (SizeInBytes != NewSize) {
1642 // Pad the initializer out to the next multiple of 16 bytes and add the
1643 // required short granule tag.
1644 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1645 Init.back() = Tag;
1647 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1648 }
1649
1650 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1651 GlobalValue::ExternalLinkage, Initializer,
1652 GV->getName() + ".hwasan");
1653 NewGV->copyAttributesFrom(GV);
1654 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1655 NewGV->copyMetadata(GV, 0);
1656 NewGV->setAlignment(
1657 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1658
1659 // It is invalid to ICF two globals that have different tags. In the case
1660 // where the size of the global is a multiple of the tag granularity the
1661 // contents of the globals may be the same but the tags (i.e. symbol values)
1662 // may be different, and the symbols are not considered during ICF. In the
1663 // case where the size is not a multiple of the granularity, the short granule
1664 // tags would discriminate two globals with different tags, but there would
1665 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1666 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1667 // granule tag in the last byte.
1668 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1669
1670 // Descriptor format (assuming little-endian):
1671 // bytes 0-3: relative address of global
1672 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1673 // it isn't, we create multiple descriptors)
1674 // byte 7: tag
1675 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1676 const uint64_t MaxDescriptorSize = 0xfffff0;
1677 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1678 DescriptorPos += MaxDescriptorSize) {
1679 auto *Descriptor =
1680 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1681 nullptr, GV->getName() + ".hwasan.descriptor");
1682 auto *GVRelPtr = ConstantExpr::getTrunc(
1685 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1686 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1687 ConstantInt::get(Int64Ty, DescriptorPos)),
1688 Int32Ty);
1689 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1690 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1691 Descriptor->setComdat(NewGV->getComdat());
1692 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1693 Descriptor->setSection("hwasan_globals");
1694 Descriptor->setMetadata(LLVMContext::MD_associated,
1696 appendToCompilerUsed(M, Descriptor);
1697 }
1698
1701 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1702 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1703 GV->getType());
1704 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1705 GV->getLinkage(), "", Aliasee, &M);
1706 Alias->setVisibility(GV->getVisibility());
1707 Alias->takeName(GV);
1708 GV->replaceAllUsesWith(Alias);
1709 GV->eraseFromParent();
1710}
1711
1712void HWAddressSanitizer::instrumentGlobals() {
1713 std::vector<GlobalVariable *> Globals;
1714 for (GlobalVariable &GV : M.globals()) {
1716 continue;
1717
1718 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1719 GV.isThreadLocal())
1720 continue;
1721
1722 // Common symbols can't have aliases point to them, so they can't be tagged.
1723 if (GV.hasCommonLinkage())
1724 continue;
1725
1726 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1727 // which would be broken both by adding tags and potentially by the extra
1728 // padding/alignment that we insert.
1729 if (GV.hasSection())
1730 continue;
1731
1732 Globals.push_back(&GV);
1733 }
1734
1735 MD5 Hasher;
1736 Hasher.update(M.getSourceFileName());
1737 MD5::MD5Result Hash;
1738 Hasher.final(Hash);
1739 uint8_t Tag = Hash[0];
1740
1741 assert(TagMaskByte >= 16);
1742
1743 for (GlobalVariable *GV : Globals) {
1744 // Don't allow globals to be tagged with something that looks like a
1745 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1746 // the fast path shadow-vs-address check succeeds.
1747 if (Tag < 16 || Tag > TagMaskByte)
1748 Tag = 16;
1749 instrumentGlobal(GV, Tag++);
1750 }
1751}
1752
1753void HWAddressSanitizer::instrumentPersonalityFunctions() {
1754 // We need to untag stack frames as we unwind past them. That is the job of
1755 // the personality function wrapper, which either wraps an existing
1756 // personality function or acts as a personality function on its own. Each
1757 // function that has a personality function or that can be unwound past has
1758 // its personality function changed to a thunk that calls the personality
1759 // function wrapper in the runtime.
1761 for (Function &F : M) {
1762 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1763 continue;
1764
1765 if (F.hasPersonalityFn()) {
1766 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1767 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1768 PersonalityFns[nullptr].push_back(&F);
1769 }
1770 }
1771
1772 if (PersonalityFns.empty())
1773 return;
1774
1775 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1776 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1777 PtrTy, PtrTy, PtrTy, PtrTy);
1778 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1779 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1780
1781 for (auto &P : PersonalityFns) {
1782 std::string ThunkName = kHwasanPersonalityThunkName;
1783 if (P.first)
1784 ThunkName += ("." + P.first->getName()).str();
1785 FunctionType *ThunkFnTy = FunctionType::get(
1786 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1787 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1788 cast<GlobalValue>(P.first)->hasLocalLinkage());
1789 auto *ThunkFn = Function::Create(ThunkFnTy,
1792 ThunkName, &M);
1793 if (!IsLocal) {
1794 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1795 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1796 }
1797
1798 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1799 IRBuilder<> IRB(BB);
1800 CallInst *WrapperCall = IRB.CreateCall(
1801 HwasanPersonalityWrapper,
1802 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1803 ThunkFn->getArg(3), ThunkFn->getArg(4),
1804 P.first ? P.first : Constant::getNullValue(PtrTy),
1805 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1806 WrapperCall->setTailCall();
1807 IRB.CreateRet(WrapperCall);
1808
1809 for (Function *F : P.second)
1810 F->setPersonalityFn(ThunkFn);
1811 }
1812}
1813
1814void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1815 bool InstrumentWithCalls) {
1816 Scale = kDefaultShadowScale;
1817 if (TargetTriple.isOSFuchsia()) {
1818 // Fuchsia is always PIE, which means that the beginning of the address
1819 // space is always available.
1820 InGlobal = false;
1821 InTls = false;
1822 Offset = 0;
1823 WithFrameRecord = true;
1824 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1825 InGlobal = false;
1826 InTls = false;
1828 WithFrameRecord = false;
1829 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1830 InGlobal = false;
1831 InTls = false;
1832 Offset = 0;
1833 WithFrameRecord = false;
1834 } else if (ClWithIfunc) {
1835 InGlobal = true;
1836 InTls = false;
1838 WithFrameRecord = false;
1839 } else if (ClWithTls) {
1840 InGlobal = false;
1841 InTls = true;
1843 WithFrameRecord = true;
1844 } else {
1845 InGlobal = false;
1846 InTls = false;
1848 WithFrameRecord = false;
1849 }
1850}
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:691
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1291
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static cl::opt< float > ClRandomSkipRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function."))
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cuttoff."))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:107
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:492
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:199
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:705
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2542
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2535
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2098
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:476
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:164
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:525
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:80
void setComdat(Comdat *C)
Definition: Globals.cpp:197
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:228
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:355
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:462
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1881
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1978
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2170
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:595
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2122
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:104
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1437
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:526
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1095
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1378
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2245
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2249
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1790
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1416
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2021
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1475
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1803
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2007
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1497
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:569
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2253
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2196
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1456
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1519
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:631
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:221
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:756
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:112
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:162
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:129
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:766
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:764
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:372
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:984
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:906
bool isOSFuchsia() const
Definition: Triple.h:583
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:714
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
int getNumOccurrences() const
Definition: CommandLine.h:406
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1695
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1469
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
bool isLifetimeIntrinsic(Value *V)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:73
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4099
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:74
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec