LLVM 17.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringRef.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/InlineAsm.h"
38#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/LLVMContext.h"
43#include "llvm/IR/MDBuilder.h"
44#include "llvm/IR/Module.h"
45#include "llvm/IR/NoFolder.h"
46#include "llvm/IR/Type.h"
47#include "llvm/IR/Value.h"
50#include "llvm/Support/Debug.h"
58#include <optional>
59
60using namespace llvm;
61
62#define DEBUG_TYPE "hwasan"
63
64const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
65const char kHwasanNoteName[] = "hwasan.note";
66const char kHwasanInitName[] = "__hwasan_init";
67const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
68
70 "__hwasan_shadow_memory_dynamic_address";
71
72// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
73static const size_t kNumberOfAccessSizes = 5;
74
75static const size_t kDefaultShadowScale = 4;
77 std::numeric_limits<uint64_t>::max();
78
79static const unsigned kShadowBaseAlignment = 32;
80
82 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
83 cl::desc("Prefix for memory access callbacks"),
84 cl::Hidden, cl::init("__hwasan_"));
85
87 "hwasan-kernel-mem-intrinsic-prefix",
88 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
89 cl::init(false));
90
92 "hwasan-instrument-with-calls",
93 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
94 cl::init(false));
95
96static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
97 cl::desc("instrument read instructions"),
98 cl::Hidden, cl::init(true));
99
100static cl::opt<bool>
101 ClInstrumentWrites("hwasan-instrument-writes",
102 cl::desc("instrument write instructions"), cl::Hidden,
103 cl::init(true));
104
106 "hwasan-instrument-atomics",
107 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
108 cl::init(true));
109
110static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
111 cl::desc("instrument byval arguments"),
112 cl::Hidden, cl::init(true));
113
114static cl::opt<bool>
115 ClRecover("hwasan-recover",
116 cl::desc("Enable recovery mode (continue-after-error)."),
117 cl::Hidden, cl::init(false));
118
119static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
120 cl::desc("instrument stack (allocas)"),
121 cl::Hidden, cl::init(true));
122
123static cl::opt<bool>
124 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
125 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
127
129 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
131 cl::desc("How many lifetime ends to handle for a single alloca."),
133
134static cl::opt<bool>
135 ClUseAfterScope("hwasan-use-after-scope",
136 cl::desc("detect use after scope within function"),
137 cl::Hidden, cl::init(false));
138
140 "hwasan-uar-retag-to-zero",
141 cl::desc("Clear alloca tags before returning from the function to allow "
142 "non-instrumented and instrumented function calls mix. When set "
143 "to false, allocas are retagged before returning from the "
144 "function to detect use after return."),
145 cl::Hidden, cl::init(true));
146
148 "hwasan-generate-tags-with-calls",
149 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
150 cl::init(false));
151
152static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
153 cl::Hidden, cl::init(false));
154
156 "hwasan-match-all-tag",
157 cl::desc("don't report bad accesses via pointers with this tag"),
158 cl::Hidden, cl::init(-1));
159
160static cl::opt<bool>
161 ClEnableKhwasan("hwasan-kernel",
162 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
163 cl::Hidden, cl::init(false));
164
165// These flags allow to change the shadow mapping and control how shadow memory
166// is accessed. The shadow mapping looks like:
167// Shadow = (Mem >> scale) + offset
168
170 ClMappingOffset("hwasan-mapping-offset",
171 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
172 cl::Hidden, cl::init(0));
173
174static cl::opt<bool>
175 ClWithIfunc("hwasan-with-ifunc",
176 cl::desc("Access dynamic shadow through an ifunc global on "
177 "platforms that support this"),
178 cl::Hidden, cl::init(false));
179
181 "hwasan-with-tls",
182 cl::desc("Access dynamic shadow through an thread-local pointer on "
183 "platforms that support this"),
184 cl::Hidden, cl::init(true));
185
186// Mode for selecting how to insert frame record info into the stack ring
187// buffer.
189 // Do not record frame record info.
191
192 // Insert instructions into the prologue for storing into the stack ring
193 // buffer directly.
195
196 // Add a call to __hwasan_add_frame_record in the runtime.
198};
199
201 "hwasan-record-stack-history",
202 cl::desc("Record stack frames with tagged allocations in a thread-local "
203 "ring buffer"),
204 cl::values(clEnumVal(none, "Do not record stack ring history"),
205 clEnumVal(instr, "Insert instructions into the prologue for "
206 "storing into the stack ring buffer directly"),
207 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
208 "storing into the stack ring buffer")),
210
211static cl::opt<bool>
212 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
213 cl::desc("instrument memory intrinsics"),
214 cl::Hidden, cl::init(true));
215
216static cl::opt<bool>
217 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
218 cl::desc("instrument landing pads"), cl::Hidden,
219 cl::init(false));
220
222 "hwasan-use-short-granules",
223 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
224 cl::init(false));
225
227 "hwasan-instrument-personality-functions",
228 cl::desc("instrument personality functions"), cl::Hidden);
229
230static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
231 cl::desc("inline all checks"),
232 cl::Hidden, cl::init(false));
233
234// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
235static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
236 cl::desc("Use page aliasing in HWASan"),
237 cl::Hidden, cl::init(false));
238
239namespace {
240
241bool shouldUsePageAliases(const Triple &TargetTriple) {
242 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
243}
244
245bool shouldInstrumentStack(const Triple &TargetTriple) {
246 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
247}
248
249bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
250 return ClInstrumentWithCalls || TargetTriple.getArch() == Triple::x86_64;
251}
252
253bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
254 return ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
255 : !DisableOptimization;
256}
257
258bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
259 bool DisableOptimization) {
260 return shouldInstrumentStack(TargetTriple) &&
261 mightUseStackSafetyAnalysis(DisableOptimization);
262}
263
264bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
265 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
266}
267
268/// An instrumentation pass implementing detection of addressability bugs
269/// using tagged pointers.
270class HWAddressSanitizer {
271public:
272 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
273 const StackSafetyGlobalInfo *SSI)
274 : M(M), SSI(SSI) {
275 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
276 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
278 : CompileKernel;
279
280 initializeModule();
281 }
282
283 void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
284
285 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
286 void initializeModule();
287 void createHwasanCtorComdat();
288
289 void initializeCallbacks(Module &M);
290
291 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
292
293 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
294 Value *getShadowNonTls(IRBuilder<> &IRB);
295
296 void untagPointerOperand(Instruction *I, Value *Addr);
297 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
298
299 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
300 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
301 unsigned AccessSizeIndex,
302 Instruction *InsertBefore);
303 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
304 unsigned AccessSizeIndex,
305 Instruction *InsertBefore);
306 bool ignoreMemIntrinsic(MemIntrinsic *MI);
307 void instrumentMemIntrinsic(MemIntrinsic *MI);
308 bool instrumentMemAccess(InterestingMemoryOperand &O);
309 bool ignoreAccess(Instruction *Inst, Value *Ptr);
310 void getInterestingMemoryOperands(
312
313 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
314 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
315 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
316 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag,
317 const DominatorTree &DT, const PostDominatorTree &PDT,
318 const LoopInfo &LI);
319 Value *readRegister(IRBuilder<> &IRB, StringRef Name);
320 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
321 Value *getNextTagWithCall(IRBuilder<> &IRB);
322 Value *getStackBaseTag(IRBuilder<> &IRB);
323 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
324 unsigned AllocaNo);
325 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
326
327 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
328 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
329 unsigned retagMask(unsigned AllocaNo);
330
331 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
332
333 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
334 void instrumentGlobals();
335
336 Value *getPC(IRBuilder<> &IRB);
337 Value *getSP(IRBuilder<> &IRB);
338 Value *getFrameRecordInfo(IRBuilder<> &IRB);
339
340 void instrumentPersonalityFunctions();
341
342private:
343 LLVMContext *C;
344 Module &M;
345 const StackSafetyGlobalInfo *SSI;
346 Triple TargetTriple;
347 FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
348 FunctionCallee HWAsanHandleVfork;
349
350 /// This struct defines the shadow mapping using the rule:
351 /// shadow = (mem >> Scale) + Offset.
352 /// If InGlobal is true, then
353 /// extern char __hwasan_shadow[];
354 /// shadow = (mem >> Scale) + &__hwasan_shadow
355 /// If InTls is true, then
356 /// extern char *__hwasan_tls;
357 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
358 ///
359 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
360 /// ring buffer for storing stack allocations on targets that support it.
361 struct ShadowMapping {
362 uint8_t Scale;
364 bool InGlobal;
365 bool InTls;
366 bool WithFrameRecord;
367
368 void init(Triple &TargetTriple, bool InstrumentWithCalls);
369 Align getObjectAlignment() const { return Align(1ULL << Scale); }
370 };
371
372 ShadowMapping Mapping;
373
374 Type *VoidTy = Type::getVoidTy(M.getContext());
375 Type *IntptrTy;
376 Type *Int8PtrTy;
377 Type *Int8Ty;
378 Type *Int32Ty;
379 Type *Int64Ty = Type::getInt64Ty(M.getContext());
380
381 bool CompileKernel;
382 bool Recover;
383 bool OutlinedChecks;
384 bool UseShortGranules;
385 bool InstrumentLandingPads;
386 bool InstrumentWithCalls;
387 bool InstrumentStack;
388 bool DetectUseAfterScope;
389 bool UsePageAliases;
390
391 std::optional<uint8_t> MatchAllTag;
392
393 unsigned PointerTagShift;
394 uint64_t TagMaskByte;
395
396 Function *HwasanCtorFunction;
397
398 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
399 FunctionCallee HwasanMemoryAccessCallbackSized[2];
400
401 FunctionCallee HwasanTagMemoryFunc;
402 FunctionCallee HwasanGenerateTagFunc;
403 FunctionCallee HwasanRecordFrameRecordFunc;
404
405 Constant *ShadowGlobal;
406
407 Value *ShadowBase = nullptr;
408 Value *StackBaseTag = nullptr;
409 Value *CachedSP = nullptr;
410 GlobalValue *ThreadPtrGlobal = nullptr;
411};
412
413} // end anonymous namespace
414
417 const StackSafetyGlobalInfo *SSI = nullptr;
418 auto TargetTriple = llvm::Triple(M.getTargetTriple());
419 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
421
422 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
423 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
424 for (Function &F : M)
425 HWASan.sanitizeFunction(F, FAM);
426
428 // GlobalsAA is considered stateless and does not get invalidated unless
429 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
430 // make changes that require GlobalsAA to be invalidated.
431 PA.abandon<GlobalsAA>();
432 return PA;
433}
435 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
437 OS, MapClassName2PassName);
438 OS << '<';
439 if (Options.CompileKernel)
440 OS << "kernel;";
441 if (Options.Recover)
442 OS << "recover";
443 OS << '>';
444}
445
446void HWAddressSanitizer::createHwasanCtorComdat() {
447 std::tie(HwasanCtorFunction, std::ignore) =
450 /*InitArgTypes=*/{},
451 /*InitArgs=*/{},
452 // This callback is invoked when the functions are created the first
453 // time. Hook them into the global ctors list in that case:
454 [&](Function *Ctor, FunctionCallee) {
455 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
456 Ctor->setComdat(CtorComdat);
457 appendToGlobalCtors(M, Ctor, 0, Ctor);
458 });
459
460 // Create a note that contains pointers to the list of global
461 // descriptors. Adding a note to the output file will cause the linker to
462 // create a PT_NOTE program header pointing to the note that we can use to
463 // find the descriptor list starting from the program headers. A function
464 // provided by the runtime initializes the shadow memory for the globals by
465 // accessing the descriptor list via the note. The dynamic loader needs to
466 // call this function whenever a library is loaded.
467 //
468 // The reason why we use a note for this instead of a more conventional
469 // approach of having a global constructor pass a descriptor list pointer to
470 // the runtime is because of an order of initialization problem. With
471 // constructors we can encounter the following problematic scenario:
472 //
473 // 1) library A depends on library B and also interposes one of B's symbols
474 // 2) B's constructors are called before A's (as required for correctness)
475 // 3) during construction, B accesses one of its "own" globals (actually
476 // interposed by A) and triggers a HWASAN failure due to the initialization
477 // for A not having happened yet
478 //
479 // Even without interposition it is possible to run into similar situations in
480 // cases where two libraries mutually depend on each other.
481 //
482 // We only need one note per binary, so put everything for the note in a
483 // comdat. This needs to be a comdat with an .init_array section to prevent
484 // newer versions of lld from discarding the note.
485 //
486 // Create the note even if we aren't instrumenting globals. This ensures that
487 // binaries linked from object files with both instrumented and
488 // non-instrumented globals will end up with a note, even if a comdat from an
489 // object file with non-instrumented globals is selected. The note is harmless
490 // if the runtime doesn't support it, since it will just be ignored.
491 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
492
493 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
494 auto *Start =
495 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
496 nullptr, "__start_hwasan_globals");
497 Start->setVisibility(GlobalValue::HiddenVisibility);
498 auto *Stop =
499 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
500 nullptr, "__stop_hwasan_globals");
501 Stop->setVisibility(GlobalValue::HiddenVisibility);
502
503 // Null-terminated so actually 8 bytes, which are required in order to align
504 // the note properly.
505 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
506
507 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
509 auto *Note =
510 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
512 Note->setSection(".note.hwasan.globals");
513 Note->setComdat(NoteComdat);
514 Note->setAlignment(Align(4));
515
516 // The pointers in the note need to be relative so that the note ends up being
517 // placed in rodata, which is the standard location for notes.
518 auto CreateRelPtr = [&](Constant *Ptr) {
522 Int32Ty);
523 };
524 Note->setInitializer(ConstantStruct::getAnon(
525 {ConstantInt::get(Int32Ty, 8), // n_namesz
526 ConstantInt::get(Int32Ty, 8), // n_descsz
528 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
530
531 // Create a zero-length global in hwasan_globals so that the linker will
532 // always create start and stop symbols.
533 auto *Dummy = new GlobalVariable(
534 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
535 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
536 Dummy->setSection("hwasan_globals");
537 Dummy->setComdat(NoteComdat);
538 Dummy->setMetadata(LLVMContext::MD_associated,
540 appendToCompilerUsed(M, Dummy);
541}
542
543/// Module-level initialization.
544///
545/// inserts a call to __hwasan_init to the module's constructor list.
546void HWAddressSanitizer::initializeModule() {
547 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
548 auto &DL = M.getDataLayout();
549
550 TargetTriple = Triple(M.getTargetTriple());
551
552 // x86_64 currently has two modes:
553 // - Intel LAM (default)
554 // - pointer aliasing (heap only)
555 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
556 UsePageAliases = shouldUsePageAliases(TargetTriple);
557 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
558 InstrumentStack = shouldInstrumentStack(TargetTriple);
559 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
560 PointerTagShift = IsX86_64 ? 57 : 56;
561 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
562
563 Mapping.init(TargetTriple, InstrumentWithCalls);
564
565 C = &(M.getContext());
566 IRBuilder<> IRB(*C);
567 IntptrTy = IRB.getIntPtrTy(DL);
568 Int8PtrTy = IRB.getInt8PtrTy();
569 Int8Ty = IRB.getInt8Ty();
570 Int32Ty = IRB.getInt32Ty();
571
572 HwasanCtorFunction = nullptr;
573
574 // Older versions of Android do not have the required runtime support for
575 // short granules, global or personality function instrumentation. On other
576 // platforms we currently require using the latest version of the runtime.
577 bool NewRuntime =
578 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
579
580 UseShortGranules =
581 ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
582 OutlinedChecks =
583 (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
584 TargetTriple.isOSBinFormatELF() &&
585 (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
586
587 if (ClMatchAllTag.getNumOccurrences()) {
588 if (ClMatchAllTag != -1) {
589 MatchAllTag = ClMatchAllTag & 0xFF;
590 }
591 } else if (CompileKernel) {
592 MatchAllTag = 0xFF;
593 }
594
595 // If we don't have personality function support, fall back to landing pads.
596 InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
598 : !NewRuntime;
599
600 if (!CompileKernel) {
601 createHwasanCtorComdat();
602 bool InstrumentGlobals =
603 ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
604
605 if (InstrumentGlobals && !UsePageAliases)
606 instrumentGlobals();
607
608 bool InstrumentPersonalityFunctions =
609 ClInstrumentPersonalityFunctions.getNumOccurrences()
611 : NewRuntime;
612 if (InstrumentPersonalityFunctions)
613 instrumentPersonalityFunctions();
614 }
615
616 if (!TargetTriple.isAndroid()) {
617 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
618 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
620 "__hwasan_tls", nullptr,
623 return GV;
624 });
625 ThreadPtrGlobal = cast<GlobalVariable>(C);
626 }
627}
628
629void HWAddressSanitizer::initializeCallbacks(Module &M) {
630 IRBuilder<> IRB(*C);
631 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
632 const std::string TypeStr = AccessIsWrite ? "store" : "load";
633 const std::string EndingStr = Recover ? "_noabort" : "";
634
635 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
636 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
637 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
638
639 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
640 AccessSizeIndex++) {
641 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
642 M.getOrInsertFunction(
644 itostr(1ULL << AccessSizeIndex) + EndingStr,
645 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
646 }
647 }
648
649 HwasanTagMemoryFunc = M.getOrInsertFunction(
650 "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
651 HwasanGenerateTagFunc =
652 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
653
654 HwasanRecordFrameRecordFunc = M.getOrInsertFunction(
655 "__hwasan_add_frame_record", IRB.getVoidTy(), Int64Ty);
656
657 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
658 ArrayType::get(IRB.getInt8Ty(), 0));
659
660 const std::string MemIntrinCallbackPrefix =
661 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
662 ? std::string("")
664 HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
665 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
666 IRB.getInt8PtrTy(), IntptrTy);
667 HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
668 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
669 IRB.getInt8PtrTy(), IntptrTy);
670 HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
671 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
672 IRB.getInt32Ty(), IntptrTy);
673
674 HWAsanHandleVfork =
675 M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
676}
677
678Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
679 // An empty inline asm with input reg == output reg.
680 // An opaque no-op cast, basically.
681 // This prevents code bloat as a result of rematerializing trivial definitions
682 // such as constants or global addresses at every load and store.
683 InlineAsm *Asm =
684 InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
685 StringRef(""), StringRef("=r,0"),
686 /*hasSideEffects=*/false);
687 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
688}
689
690Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
691 return getOpaqueNoopCast(IRB, ShadowGlobal);
692}
693
694Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
695 if (Mapping.Offset != kDynamicShadowSentinel)
696 return getOpaqueNoopCast(
698 ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
699
700 if (Mapping.InGlobal)
701 return getDynamicShadowIfunc(IRB);
702
703 Value *GlobalDynamicAddress =
706 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
707}
708
709bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
710 // Do not instrument accesses from different address spaces; we cannot deal
711 // with them.
712 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
713 if (PtrTy->getPointerAddressSpace() != 0)
714 return true;
715
716 // Ignore swifterror addresses.
717 // swifterror memory addresses are mem2reg promoted by instruction
718 // selection. As such they cannot have regular uses like an instrumentation
719 // function and it makes no sense to track them as memory.
720 if (Ptr->isSwiftError())
721 return true;
722
723 if (findAllocaForValue(Ptr)) {
724 if (!InstrumentStack)
725 return true;
726 if (SSI && SSI->stackAccessIsSafe(*Inst))
727 return true;
728 }
729 return false;
730}
731
732void HWAddressSanitizer::getInterestingMemoryOperands(
734 // Skip memory accesses inserted by another instrumentation.
735 if (I->hasMetadata(LLVMContext::MD_nosanitize))
736 return;
737
738 // Do not instrument the load fetching the dynamic shadow address.
739 if (ShadowBase == I)
740 return;
741
742 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
743 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
744 return;
745 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
746 LI->getType(), LI->getAlign());
747 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
748 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
749 return;
750 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
751 SI->getValueOperand()->getType(), SI->getAlign());
752 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
753 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
754 return;
755 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
756 RMW->getValOperand()->getType(), std::nullopt);
757 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
758 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
759 return;
760 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
761 XCHG->getCompareOperand()->getType(),
762 std::nullopt);
763 } else if (auto *CI = dyn_cast<CallInst>(I)) {
764 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
765 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
766 ignoreAccess(I, CI->getArgOperand(ArgNo)))
767 continue;
768 Type *Ty = CI->getParamByValType(ArgNo);
769 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
770 }
771 }
772}
773
775 if (LoadInst *LI = dyn_cast<LoadInst>(I))
776 return LI->getPointerOperandIndex();
777 if (StoreInst *SI = dyn_cast<StoreInst>(I))
778 return SI->getPointerOperandIndex();
779 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
780 return RMW->getPointerOperandIndex();
781 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
782 return XCHG->getPointerOperandIndex();
783 report_fatal_error("Unexpected instruction");
784 return -1;
785}
786
788 size_t Res = llvm::countr_zero(TypeSize / 8);
790 return Res;
791}
792
793void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
794 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
795 TargetTriple.isRISCV64())
796 return;
797
798 IRBuilder<> IRB(I);
799 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
800 Value *UntaggedPtr =
801 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
802 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
803}
804
805Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
806 // Mem >> Scale
807 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
808 if (Mapping.Offset == 0)
809 return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
810 // (Mem >> Scale) + Offset
811 return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
812}
813
814int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
815 unsigned AccessSizeIndex) {
816 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
817 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
818 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
819 (Recover << HWASanAccessInfo::RecoverShift) |
820 (IsWrite << HWASanAccessInfo::IsWriteShift) |
821 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
822}
823
824void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
825 unsigned AccessSizeIndex,
826 Instruction *InsertBefore) {
827 assert(!UsePageAliases);
828 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
829 IRBuilder<> IRB(InsertBefore);
831 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
833 M, UseShortGranules
834 ? Intrinsic::hwasan_check_memaccess_shortgranules
835 : Intrinsic::hwasan_check_memaccess),
836 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
837}
838
839void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
840 unsigned AccessSizeIndex,
841 Instruction *InsertBefore) {
842 assert(!UsePageAliases);
843 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
844 IRBuilder<> IRB(InsertBefore);
845
846 Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
847 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, PointerTagShift),
848 IRB.getInt8Ty());
849 Value *AddrLong = untagPointer(IRB, PtrLong);
850 Value *Shadow = memToShadow(AddrLong, IRB);
851 Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
852 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
853
854 if (MatchAllTag.has_value()) {
855 Value *TagNotIgnored = IRB.CreateICmpNE(
856 PtrTag, ConstantInt::get(PtrTag->getType(), *MatchAllTag));
857 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
858 }
859
860 Instruction *CheckTerm =
861 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
862 MDBuilder(*C).createBranchWeights(1, 100000));
863
864 IRB.SetInsertPoint(CheckTerm);
865 Value *OutOfShortGranuleTagRange =
866 IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
867 Instruction *CheckFailTerm =
868 SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
869 MDBuilder(*C).createBranchWeights(1, 100000));
870
871 IRB.SetInsertPoint(CheckTerm);
872 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
873 PtrLowBits = IRB.CreateAdd(
874 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
875 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
876 SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
877 MDBuilder(*C).createBranchWeights(1, 100000),
878 (DomTreeUpdater *)nullptr, nullptr,
879 CheckFailTerm->getParent());
880
881 IRB.SetInsertPoint(CheckTerm);
882 Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
883 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
884 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
885 Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
886 SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
887 MDBuilder(*C).createBranchWeights(1, 100000),
888 (DomTreeUpdater *)nullptr, nullptr,
889 CheckFailTerm->getParent());
890
891 IRB.SetInsertPoint(CheckFailTerm);
892 InlineAsm *Asm;
893 switch (TargetTriple.getArch()) {
894 case Triple::x86_64:
895 // The signal handler will find the data address in rdi.
897 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
898 "int3\nnopl " +
899 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
900 "(%rax)",
901 "{rdi}",
902 /*hasSideEffects=*/true);
903 break;
904 case Triple::aarch64:
906 // The signal handler will find the data address in x0.
908 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
909 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
910 "{x0}",
911 /*hasSideEffects=*/true);
912 break;
913 case Triple::riscv64:
914 // The signal handler will find the data address in x10.
916 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
917 "ebreak\naddiw x0, x11, " +
918 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
919 "{x10}",
920 /*hasSideEffects=*/true);
921 break;
922 default:
923 report_fatal_error("unsupported architecture");
924 }
925 IRB.CreateCall(Asm, PtrLong);
926 if (Recover)
927 cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
928}
929
930bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
931 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
932 return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
933 (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
934 }
935 if (isa<MemSetInst>(MI))
936 return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
937 return false;
938}
939
940void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
941 IRBuilder<> IRB(MI);
942 if (isa<MemTransferInst>(MI)) {
943 IRB.CreateCall(
944 isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
945 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
946 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
947 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
948 } else if (isa<MemSetInst>(MI)) {
949 IRB.CreateCall(
950 HWAsanMemset,
951 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
952 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
953 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
954 }
955 MI->eraseFromParent();
956}
957
958bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
959 Value *Addr = O.getPtr();
960
961 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
962
963 if (O.MaybeMask)
964 return false; // FIXME
965
966 IRBuilder<> IRB(O.getInsn());
967 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
968 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
969 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
970 *O.Alignment >= O.TypeStoreSize / 8)) {
971 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
972 if (InstrumentWithCalls) {
973 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
974 IRB.CreatePointerCast(Addr, IntptrTy));
975 } else if (OutlinedChecks) {
976 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
977 } else {
978 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
979 }
980 } else {
981 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
982 {IRB.CreatePointerCast(Addr, IntptrTy),
983 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy,
984 O.TypeStoreSize),
985 ConstantInt::get(IntptrTy, 8))});
986 }
987 untagPointerOperand(O.getInsn(), Addr);
988
989 return true;
990}
991
992void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
993 size_t Size) {
994 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
995 if (!UseShortGranules)
996 Size = AlignedSize;
997
998 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
999 if (InstrumentWithCalls) {
1000 IRB.CreateCall(HwasanTagMemoryFunc,
1001 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
1002 ConstantInt::get(IntptrTy, AlignedSize)});
1003 } else {
1004 size_t ShadowSize = Size >> Mapping.Scale;
1005 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
1006 // If this memset is not inlined, it will be intercepted in the hwasan
1007 // runtime library. That's OK, because the interceptor skips the checks if
1008 // the address is in the shadow region.
1009 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1010 // llvm.memset right here into either a sequence of stores, or a call to
1011 // hwasan_tag_memory.
1012 if (ShadowSize)
1013 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1));
1014 if (Size != AlignedSize) {
1015 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1016 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1017 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1018 IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
1019 Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
1020 AlignedSize - 1));
1021 }
1022 }
1023}
1024
1025unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1026 if (TargetTriple.getArch() == Triple::x86_64)
1027 return AllocaNo & TagMaskByte;
1028
1029 // A list of 8-bit numbers that have at most one run of non-zero bits.
1030 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1031 // masks.
1032 // The list does not include the value 255, which is used for UAR.
1033 //
1034 // Because we are more likely to use earlier elements of this list than later
1035 // ones, it is sorted in increasing order of probability of collision with a
1036 // mask allocated (temporally) nearby. The program that generated this list
1037 // can be found at:
1038 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1039 static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240,
1040 48, 16, 120, 248, 56, 24, 8, 124, 252,
1041 60, 28, 12, 4, 126, 254, 62, 30, 14,
1042 6, 2, 127, 63, 31, 15, 7, 3, 1};
1043 return FastMasks[AllocaNo % std::size(FastMasks)];
1044}
1045
1046Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1047 if (TargetTriple.getArch() == Triple::x86_64) {
1048 Constant *TagMask = ConstantInt::get(IntptrTy, TagMaskByte);
1049 Value *NewTag = IRB.CreateAnd(OldTag, TagMask);
1050 return NewTag;
1051 }
1052 // aarch64 uses 8-bit tags, so no mask is needed.
1053 return OldTag;
1054}
1055
1056Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1057 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1058}
1059
1060Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1062 return getNextTagWithCall(IRB);
1063 if (StackBaseTag)
1064 return StackBaseTag;
1065 // Extract some entropy from the stack pointer for the tags.
1066 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1067 // between functions).
1068 Value *StackPointerLong = getSP(IRB);
1069 Value *StackTag =
1070 applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1071 IRB.CreateLShr(StackPointerLong, 20)));
1072 StackTag->setName("hwasan.stack.base.tag");
1073 return StackTag;
1074}
1075
1076Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1077 AllocaInst *AI, unsigned AllocaNo) {
1079 return getNextTagWithCall(IRB);
1080 return IRB.CreateXor(StackTag,
1081 ConstantInt::get(IntptrTy, retagMask(AllocaNo)));
1082}
1083
1084Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
1085 if (ClUARRetagToZero)
1086 return ConstantInt::get(IntptrTy, 0);
1088 return getNextTagWithCall(IRB);
1089 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, TagMaskByte));
1090}
1091
1092// Add a tag to an address.
1093Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1094 Value *PtrLong, Value *Tag) {
1095 assert(!UsePageAliases);
1096 Value *TaggedPtrLong;
1097 if (CompileKernel) {
1098 // Kernel addresses have 0xFF in the most significant byte.
1099 Value *ShiftedTag =
1100 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1101 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1102 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1103 } else {
1104 // Userspace can simply do OR (tag << PointerTagShift);
1105 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1106 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1107 }
1108 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1109}
1110
1111// Remove tag from an address.
1112Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1113 assert(!UsePageAliases);
1114 Value *UntaggedPtrLong;
1115 if (CompileKernel) {
1116 // Kernel addresses have 0xFF in the most significant byte.
1117 UntaggedPtrLong =
1118 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1119 0xFFULL << PointerTagShift));
1120 } else {
1121 // Userspace addresses have 0x00.
1122 UntaggedPtrLong =
1123 IRB.CreateAnd(PtrLong, ConstantInt::get(PtrLong->getType(),
1124 ~(0xFFULL << PointerTagShift)));
1125 }
1126 return UntaggedPtrLong;
1127}
1128
1129Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1130 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1131 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1132 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1133 // in Bionic's libc/private/bionic_tls.h.
1134 Function *ThreadPointerFunc =
1135 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1136 Value *SlotPtr = IRB.CreatePointerCast(
1137 IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
1138 IRB.CreateCall(ThreadPointerFunc), 0x30),
1139 Ty->getPointerTo(0));
1140 return SlotPtr;
1141 }
1142 if (ThreadPtrGlobal)
1143 return ThreadPtrGlobal;
1144
1145 return nullptr;
1146}
1147
1148Value *HWAddressSanitizer::getPC(IRBuilder<> &IRB) {
1149 if (TargetTriple.getArch() == Triple::aarch64)
1150 return readRegister(IRB, "pc");
1151 return IRB.CreatePtrToInt(IRB.GetInsertBlock()->getParent(), IntptrTy);
1152}
1153
1154Value *HWAddressSanitizer::getSP(IRBuilder<> &IRB) {
1155 if (!CachedSP) {
1156 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
1157 // first).
1158 Function *F = IRB.GetInsertBlock()->getParent();
1159 Module *M = F->getParent();
1160 auto *GetStackPointerFn = Intrinsic::getDeclaration(
1161 M, Intrinsic::frameaddress,
1162 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1163 CachedSP = IRB.CreatePtrToInt(
1164 IRB.CreateCall(GetStackPointerFn,
1165 {Constant::getNullValue(IRB.getInt32Ty())}),
1166 IntptrTy);
1167 }
1168 return CachedSP;
1169}
1170
1171Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1172 // Prepare ring buffer data.
1173 Value *PC = getPC(IRB);
1174 Value *SP = getSP(IRB);
1175
1176 // Mix SP and PC.
1177 // Assumptions:
1178 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1179 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
1180 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1181 // 0xSSSSPPPPPPPPPPPP
1182 SP = IRB.CreateShl(SP, 44);
1183 return IRB.CreateOr(PC, SP);
1184}
1185
1186void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1187 if (!Mapping.InTls)
1188 ShadowBase = getShadowNonTls(IRB);
1189 else if (!WithFrameRecord && TargetTriple.isAndroid())
1190 ShadowBase = getDynamicShadowIfunc(IRB);
1191
1192 if (!WithFrameRecord && ShadowBase)
1193 return;
1194
1195 Value *SlotPtr = nullptr;
1196 Value *ThreadLong = nullptr;
1197 Value *ThreadLongMaybeUntagged = nullptr;
1198
1199 auto getThreadLongMaybeUntagged = [&]() {
1200 if (!SlotPtr)
1201 SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1202 if (!ThreadLong)
1203 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1204 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1205 // TBI.
1206 return TargetTriple.isAArch64() ? ThreadLong
1207 : untagPointer(IRB, ThreadLong);
1208 };
1209
1210 if (WithFrameRecord) {
1211 switch (ClRecordStackHistory) {
1212 case libcall: {
1213 // Emit a runtime call into hwasan rather than emitting instructions for
1214 // recording stack history.
1215 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1216 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1217 break;
1218 }
1219 case instr: {
1220 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1221
1222 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1223
1224 // Store data to ring buffer.
1225 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1226 Value *RecordPtr = IRB.CreateIntToPtr(ThreadLongMaybeUntagged,
1227 IntptrTy->getPointerTo(0));
1228 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1229
1230 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1231 // buffer in pages, it must be a power of two, and the start of the buffer
1232 // must be aligned by twice that much. Therefore wrap around of the ring
1233 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1234 // The use of AShr instead of LShr is due to
1235 // https://bugs.llvm.org/show_bug.cgi?id=39030
1236 // Runtime library makes sure not to use the highest bit.
1237 Value *WrapMask = IRB.CreateXor(
1238 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1239 ConstantInt::get(IntptrTy, (uint64_t)-1));
1240 Value *ThreadLongNew = IRB.CreateAnd(
1241 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1242 IRB.CreateStore(ThreadLongNew, SlotPtr);
1243 break;
1244 }
1245 case none: {
1247 "A stack history recording mode should've been selected.");
1248 }
1249 }
1250 }
1251
1252 if (!ShadowBase) {
1253 if (!ThreadLongMaybeUntagged)
1254 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1255
1256 // Get shadow base address by aligning RecordPtr up.
1257 // Note: this is not correct if the pointer is already aligned.
1258 // Runtime library will make sure this never happens.
1259 ShadowBase = IRB.CreateAdd(
1260 IRB.CreateOr(
1261 ThreadLongMaybeUntagged,
1262 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1263 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1264 ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
1265 }
1266}
1267
1268Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1269 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1270 Function *ReadRegister =
1271 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1272 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1273 Value *Args[] = {MetadataAsValue::get(*C, MD)};
1274 return IRB.CreateCall(ReadRegister, Args);
1275}
1276
1277bool HWAddressSanitizer::instrumentLandingPads(
1278 SmallVectorImpl<Instruction *> &LandingPadVec) {
1279 for (auto *LP : LandingPadVec) {
1280 IRBuilder<> IRB(LP->getNextNode());
1281 IRB.CreateCall(
1282 HWAsanHandleVfork,
1283 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1284 : "sp")});
1285 }
1286 return true;
1287}
1288
1290 auto *II = dyn_cast<IntrinsicInst>(V);
1291 return II && II->isLifetimeStartOrEnd();
1292}
1293
1294bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1295 Value *StackTag,
1296 const DominatorTree &DT,
1297 const PostDominatorTree &PDT,
1298 const LoopInfo &LI) {
1299 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1300 // alloca addresses using that. Unfortunately, offsets are not known yet
1301 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1302 // temp, shift-OR it into each alloca address and xor with the retag mask.
1303 // This generates one extra instruction per alloca use.
1304 unsigned int I = 0;
1305
1306 for (auto &KV : SInfo.AllocasToInstrument) {
1307 auto N = I++;
1308 auto *AI = KV.first;
1309 memtag::AllocaInfo &Info = KV.second;
1310 IRBuilder<> IRB(AI->getNextNode());
1311
1312 // Replace uses of the alloca with tagged address.
1313 Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
1314 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1315 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
1316 std::string Name =
1317 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1318 Replacement->setName(Name + ".hwasan");
1319
1320 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1321 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1322
1323 Value *AICast = IRB.CreatePointerCast(AI, Int8PtrTy);
1324
1325 auto HandleLifetime = [&](IntrinsicInst *II) {
1326 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1327 // set of assumptions we need to make about the lifetime. Without this we
1328 // would need to ensure that we can track the lifetime pointer to a
1329 // constant offset from the alloca, and would still need to change the
1330 // size to include the extra alignment we use for the untagging to make
1331 // the size consistent.
1332 //
1333 // The check for standard lifetime below makes sure that we have exactly
1334 // one set of start / end in any execution (i.e. the ends are not
1335 // reachable from each other), so this will not cause any problems.
1336 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1337 II->setArgOperand(1, AICast);
1338 };
1339 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1340 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1341
1342 AI->replaceUsesWithIf(Replacement, [AICast, AILong](Use &U) {
1343 auto *User = U.getUser();
1344 return User != AILong && User != AICast && !isLifetimeIntrinsic(User);
1345 });
1346
1347 for (auto *DDI : Info.DbgVariableIntrinsics) {
1348 // Prepend "tag_offset, N" to the dwarf expression.
1349 // Tag offset logically applies to the alloca pointer, and it makes sense
1350 // to put it at the beginning of the expression.
1352 retagMask(N)};
1353 for (size_t LocNo = 0; LocNo < DDI->getNumVariableLocationOps(); ++LocNo)
1354 if (DDI->getVariableLocationOp(LocNo) == AI)
1355 DDI->setExpression(DIExpression::appendOpsToArg(DDI->getExpression(),
1356 NewOps, LocNo));
1357 }
1358
1359 auto TagEnd = [&](Instruction *Node) {
1360 IRB.SetInsertPoint(Node);
1361 Value *UARTag = getUARTag(IRB, StackTag);
1362 // When untagging, use the `AlignedSize` because we need to set the tags
1363 // for the entire alloca to zero. If we used `Size` here, we would
1364 // keep the last granule tagged, and store zero in the last byte of the
1365 // last granule, due to how short granules are implemented.
1366 tagAlloca(IRB, AI, UARTag, AlignedSize);
1367 };
1368 // Calls to functions that may return twice (e.g. setjmp) confuse the
1369 // postdominator analysis, and will leave us to keep memory tagged after
1370 // function return. Work around this by always untagging at every return
1371 // statement if return_twice functions are called.
1372 bool StandardLifetime =
1373 SInfo.UnrecognizedLifetimes.empty() &&
1374 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1375 &LI, ClMaxLifetimes) &&
1376 !SInfo.CallsReturnTwice;
1377 if (DetectUseAfterScope && StandardLifetime) {
1378 IntrinsicInst *Start = Info.LifetimeStart[0];
1379 IRB.SetInsertPoint(Start->getNextNode());
1380 tagAlloca(IRB, AI, Tag, Size);
1381 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1382 SInfo.RetVec, TagEnd)) {
1383 for (auto *End : Info.LifetimeEnd)
1384 End->eraseFromParent();
1385 }
1386 } else {
1387 tagAlloca(IRB, AI, Tag, Size);
1388 for (auto *RI : SInfo.RetVec)
1389 TagEnd(RI);
1390 // We inserted tagging outside of the lifetimes, so we have to remove
1391 // them.
1392 for (auto &II : Info.LifetimeStart)
1393 II->eraseFromParent();
1394 for (auto &II : Info.LifetimeEnd)
1395 II->eraseFromParent();
1396 }
1397 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1398 }
1399 for (auto &I : SInfo.UnrecognizedLifetimes)
1400 I->eraseFromParent();
1401 return true;
1402}
1403
1404void HWAddressSanitizer::sanitizeFunction(Function &F,
1406 if (&F == HwasanCtorFunction)
1407 return;
1408
1409 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1410 return;
1411
1412 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1413
1414 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1415 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1416 SmallVector<Instruction *, 8> LandingPadVec;
1417
1418 memtag::StackInfoBuilder SIB(SSI);
1419 for (auto &Inst : instructions(F)) {
1420 if (InstrumentStack) {
1421 SIB.visit(Inst);
1422 }
1423
1424 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1425 LandingPadVec.push_back(&Inst);
1426
1427 getInterestingMemoryOperands(&Inst, OperandsToInstrument);
1428
1429 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1430 if (!ignoreMemIntrinsic(MI))
1431 IntrinToInstrument.push_back(MI);
1432 }
1433
1434 memtag::StackInfo &SInfo = SIB.get();
1435
1436 initializeCallbacks(*F.getParent());
1437
1438 if (!LandingPadVec.empty())
1439 instrumentLandingPads(LandingPadVec);
1440
1441 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1442 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1443 // __hwasan_personality_thunk is a no-op for functions without an
1444 // instrumented stack, so we can drop it.
1445 F.setPersonalityFn(nullptr);
1446 }
1447
1448 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1449 IntrinToInstrument.empty())
1450 return;
1451
1452 assert(!ShadowBase);
1453
1454 Instruction *InsertPt = &*F.getEntryBlock().begin();
1455 IRBuilder<> EntryIRB(InsertPt);
1456 emitPrologue(EntryIRB,
1457 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1458 Mapping.WithFrameRecord &&
1459 !SInfo.AllocasToInstrument.empty());
1460
1461 if (!SInfo.AllocasToInstrument.empty()) {
1464 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1465 Value *StackTag =
1466 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1467 instrumentStack(SInfo, StackTag, DT, PDT, LI);
1468 }
1469
1470 // If we split the entry block, move any allocas that were originally in the
1471 // entry block back into the entry block so that they aren't treated as
1472 // dynamic allocas.
1473 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1474 InsertPt = &*F.getEntryBlock().begin();
1475 for (Instruction &I :
1476 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1477 if (auto *AI = dyn_cast<AllocaInst>(&I))
1478 if (isa<ConstantInt>(AI->getArraySize()))
1479 I.moveBefore(InsertPt);
1480 }
1481 }
1482
1483 for (auto &Operand : OperandsToInstrument)
1484 instrumentMemAccess(Operand);
1485
1486 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1487 for (auto *Inst : IntrinToInstrument)
1488 instrumentMemIntrinsic(Inst);
1489 }
1490
1491 ShadowBase = nullptr;
1492 StackBaseTag = nullptr;
1493 CachedSP = nullptr;
1494}
1495
1496void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1497 assert(!UsePageAliases);
1498 Constant *Initializer = GV->getInitializer();
1499 uint64_t SizeInBytes =
1500 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1501 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1502 if (SizeInBytes != NewSize) {
1503 // Pad the initializer out to the next multiple of 16 bytes and add the
1504 // required short granule tag.
1505 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1506 Init.back() = Tag;
1508 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1509 }
1510
1511 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1512 GlobalValue::ExternalLinkage, Initializer,
1513 GV->getName() + ".hwasan");
1514 NewGV->copyAttributesFrom(GV);
1515 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1516 NewGV->copyMetadata(GV, 0);
1517 NewGV->setAlignment(
1518 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1519
1520 // It is invalid to ICF two globals that have different tags. In the case
1521 // where the size of the global is a multiple of the tag granularity the
1522 // contents of the globals may be the same but the tags (i.e. symbol values)
1523 // may be different, and the symbols are not considered during ICF. In the
1524 // case where the size is not a multiple of the granularity, the short granule
1525 // tags would discriminate two globals with different tags, but there would
1526 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1527 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1528 // granule tag in the last byte.
1529 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1530
1531 // Descriptor format (assuming little-endian):
1532 // bytes 0-3: relative address of global
1533 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1534 // it isn't, we create multiple descriptors)
1535 // byte 7: tag
1536 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1537 const uint64_t MaxDescriptorSize = 0xfffff0;
1538 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1539 DescriptorPos += MaxDescriptorSize) {
1540 auto *Descriptor =
1541 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1542 nullptr, GV->getName() + ".hwasan.descriptor");
1543 auto *GVRelPtr = ConstantExpr::getTrunc(
1546 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1547 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1548 ConstantInt::get(Int64Ty, DescriptorPos)),
1549 Int32Ty);
1550 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1551 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1552 Descriptor->setComdat(NewGV->getComdat());
1553 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1554 Descriptor->setSection("hwasan_globals");
1555 Descriptor->setMetadata(LLVMContext::MD_associated,
1557 appendToCompilerUsed(M, Descriptor);
1558 }
1559
1562 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1563 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1564 GV->getType());
1565 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1566 GV->getLinkage(), "", Aliasee, &M);
1567 Alias->setVisibility(GV->getVisibility());
1568 Alias->takeName(GV);
1569 GV->replaceAllUsesWith(Alias);
1570 GV->eraseFromParent();
1571}
1572
1573void HWAddressSanitizer::instrumentGlobals() {
1574 std::vector<GlobalVariable *> Globals;
1575 for (GlobalVariable &GV : M.globals()) {
1577 continue;
1578
1579 if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
1580 GV.isThreadLocal())
1581 continue;
1582
1583 // Common symbols can't have aliases point to them, so they can't be tagged.
1584 if (GV.hasCommonLinkage())
1585 continue;
1586
1587 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1588 // which would be broken both by adding tags and potentially by the extra
1589 // padding/alignment that we insert.
1590 if (GV.hasSection())
1591 continue;
1592
1593 Globals.push_back(&GV);
1594 }
1595
1596 MD5 Hasher;
1597 Hasher.update(M.getSourceFileName());
1598 MD5::MD5Result Hash;
1599 Hasher.final(Hash);
1600 uint8_t Tag = Hash[0];
1601
1602 for (GlobalVariable *GV : Globals) {
1603 Tag &= TagMaskByte;
1604 // Skip tag 0 in order to avoid collisions with untagged memory.
1605 if (Tag == 0)
1606 Tag = 1;
1607 instrumentGlobal(GV, Tag++);
1608 }
1609}
1610
1611void HWAddressSanitizer::instrumentPersonalityFunctions() {
1612 // We need to untag stack frames as we unwind past them. That is the job of
1613 // the personality function wrapper, which either wraps an existing
1614 // personality function or acts as a personality function on its own. Each
1615 // function that has a personality function or that can be unwound past has
1616 // its personality function changed to a thunk that calls the personality
1617 // function wrapper in the runtime.
1619 for (Function &F : M) {
1620 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1621 continue;
1622
1623 if (F.hasPersonalityFn()) {
1624 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1625 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1626 PersonalityFns[nullptr].push_back(&F);
1627 }
1628 }
1629
1630 if (PersonalityFns.empty())
1631 return;
1632
1633 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1634 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
1635 Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
1636 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1637 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1638
1639 for (auto &P : PersonalityFns) {
1640 std::string ThunkName = kHwasanPersonalityThunkName;
1641 if (P.first)
1642 ThunkName += ("." + P.first->getName()).str();
1643 FunctionType *ThunkFnTy = FunctionType::get(
1644 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
1645 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1646 cast<GlobalValue>(P.first)->hasLocalLinkage());
1647 auto *ThunkFn = Function::Create(ThunkFnTy,
1650 ThunkName, &M);
1651 if (!IsLocal) {
1652 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1653 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1654 }
1655
1656 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1657 IRBuilder<> IRB(BB);
1658 CallInst *WrapperCall = IRB.CreateCall(
1659 HwasanPersonalityWrapper,
1660 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1661 ThunkFn->getArg(3), ThunkFn->getArg(4),
1662 P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
1663 : Constant::getNullValue(Int8PtrTy),
1664 IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
1665 IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
1666 WrapperCall->setTailCall();
1667 IRB.CreateRet(WrapperCall);
1668
1669 for (Function *F : P.second)
1670 F->setPersonalityFn(ThunkFn);
1671 }
1672}
1673
1674void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1675 bool InstrumentWithCalls) {
1676 Scale = kDefaultShadowScale;
1677 if (TargetTriple.isOSFuchsia()) {
1678 // Fuchsia is always PIE, which means that the beginning of the address
1679 // space is always available.
1680 InGlobal = false;
1681 InTls = false;
1682 Offset = 0;
1683 WithFrameRecord = true;
1684 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1685 InGlobal = false;
1686 InTls = false;
1688 WithFrameRecord = false;
1689 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1690 InGlobal = false;
1691 InTls = false;
1692 Offset = 0;
1693 WithFrameRecord = false;
1694 } else if (ClWithIfunc) {
1695 InGlobal = true;
1696 InTls = false;
1698 WithFrameRecord = false;
1699 } else if (ClWithTls) {
1700 InGlobal = false;
1701 InTls = true;
1703 WithFrameRecord = true;
1704 } else {
1705 InGlobal = false;
1706 InTls = false;
1708 WithFrameRecord = false;
1709 }
1710}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
This file contains the simple types necessary to represent the attributes associated with functions a...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:676
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
const char kHwasanNoteName[]
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(false))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClUARRetagToZero("hwasan-uar-retag-to-zero", cl::desc("Clear alloca tags before returning from the function to allow " "non-instrumented and instrumented function calls mix. When set " "to false, allocas are retagged before returning from the " "function to detect use after return."), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
print must be executed print the must be executed context for all instructions
IntegerType * Int32Ty
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
@ Globals
Definition: TextStubV5.cpp:115
an instruction to allocate memory on the stack
Definition: Instructions.h:58
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:100
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:96
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:658
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:513
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:695
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2206
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2621
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2192
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2614
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2082
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:466
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:136
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:520
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
void setComdat(Comdat *C)
Definition: Globals.cpp:198
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:109
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:229
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:259
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:244
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
bool isDeclarationForLinker() const
Definition: GlobalValue.h:614
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:351
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:64
bool hasCommonLinkage() const
Definition: GlobalValue.h:527
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:48
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:51
Type * getValueType() const
Definition: GlobalValue.h:292
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:468
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1920
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1804
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2062
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:580
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2011
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1360
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:512
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1018
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2134
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1924
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2016
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2138
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1713
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1339
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:560
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1398
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1726
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1250
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2006
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1420
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2085
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:550
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2301
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1379
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1442
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1789
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:502
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2558
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
const BasicBlock * getParent() const
Definition: Instruction.h:90
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:1268
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:943
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1399
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:497
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
bool empty() const
Definition: MapVector.h:80
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:102
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:205
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:155
void abandon()
Mark an analysis as abandoned.
Definition: PassManager.h:206
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool startswith(StringRef Prefix) const
Definition: StringRef.h:261
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:426
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:727
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:725
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:356
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:917
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:843
bool isOSFuchsia() const
Definition: Triple.h:549
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:675
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:392
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:375
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:540
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
An efficient, type-erasing, non-owning reference to a callable.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1615
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1506
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:703
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
@ DW_OP_LLVM_tag_offset
Only used in LLVM metadata.
Definition: Dwarf.h:143
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1812
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:748
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:297
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:179
static bool isLifetimeIntrinsic(Intrinsic::ID ID)
Check if ID corresponds to a lifetime intrinsic.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:71
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights, DominatorTree *DT, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:371
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec