LLVM 19.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringRef.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/Dominators.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/IRBuilder.h"
38#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/MDBuilder.h"
46#include "llvm/IR/Module.h"
47#include "llvm/IR/Type.h"
48#include "llvm/IR/Value.h"
51#include "llvm/Support/Debug.h"
60#include <optional>
61
62using namespace llvm;
63
64#define DEBUG_TYPE "hwasan"
65
66const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
67const char kHwasanNoteName[] = "hwasan.note";
68const char kHwasanInitName[] = "__hwasan_init";
69const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
70
72 "__hwasan_shadow_memory_dynamic_address";
73
74// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
75static const size_t kNumberOfAccessSizes = 5;
76
77static const size_t kDefaultShadowScale = 4;
79 std::numeric_limits<uint64_t>::max();
80
81static const unsigned kShadowBaseAlignment = 32;
82
84 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
85 cl::desc("Prefix for memory access callbacks"),
86 cl::Hidden, cl::init("__hwasan_"));
87
89 "hwasan-kernel-mem-intrinsic-prefix",
90 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
91 cl::init(false));
92
94 "hwasan-instrument-with-calls",
95 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
96 cl::init(false));
97
98static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
99 cl::desc("instrument read instructions"),
100 cl::Hidden, cl::init(true));
101
102static cl::opt<bool>
103 ClInstrumentWrites("hwasan-instrument-writes",
104 cl::desc("instrument write instructions"), cl::Hidden,
105 cl::init(true));
106
108 "hwasan-instrument-atomics",
109 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
110 cl::init(true));
111
112static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
113 cl::desc("instrument byval arguments"),
114 cl::Hidden, cl::init(true));
115
116static cl::opt<bool>
117 ClRecover("hwasan-recover",
118 cl::desc("Enable recovery mode (continue-after-error)."),
119 cl::Hidden, cl::init(false));
120
121static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
122 cl::desc("instrument stack (allocas)"),
123 cl::Hidden, cl::init(true));
124
125static cl::opt<bool>
126 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
127 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
129
131 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
133 cl::desc("How many lifetime ends to handle for a single alloca."),
135
136static cl::opt<bool>
137 ClUseAfterScope("hwasan-use-after-scope",
138 cl::desc("detect use after scope within function"),
139 cl::Hidden, cl::init(true));
140
142 "hwasan-generate-tags-with-calls",
143 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
144 cl::init(false));
145
146static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
147 cl::Hidden, cl::init(false));
148
150 "hwasan-match-all-tag",
151 cl::desc("don't report bad accesses via pointers with this tag"),
152 cl::Hidden, cl::init(-1));
153
154static cl::opt<bool>
155 ClEnableKhwasan("hwasan-kernel",
156 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
157 cl::Hidden, cl::init(false));
158
159// These flags allow to change the shadow mapping and control how shadow memory
160// is accessed. The shadow mapping looks like:
161// Shadow = (Mem >> scale) + offset
162
164 ClMappingOffset("hwasan-mapping-offset",
165 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
166 cl::Hidden, cl::init(0));
167
168static cl::opt<bool>
169 ClWithIfunc("hwasan-with-ifunc",
170 cl::desc("Access dynamic shadow through an ifunc global on "
171 "platforms that support this"),
172 cl::Hidden, cl::init(false));
173
175 "hwasan-with-tls",
176 cl::desc("Access dynamic shadow through an thread-local pointer on "
177 "platforms that support this"),
178 cl::Hidden, cl::init(true));
179
180// Mode for selecting how to insert frame record info into the stack ring
181// buffer.
183 // Do not record frame record info.
185
186 // Insert instructions into the prologue for storing into the stack ring
187 // buffer directly.
189
190 // Add a call to __hwasan_add_frame_record in the runtime.
192};
193
195 "hwasan-record-stack-history",
196 cl::desc("Record stack frames with tagged allocations in a thread-local "
197 "ring buffer"),
198 cl::values(clEnumVal(none, "Do not record stack ring history"),
199 clEnumVal(instr, "Insert instructions into the prologue for "
200 "storing into the stack ring buffer directly"),
201 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
202 "storing into the stack ring buffer")),
204
205static cl::opt<bool>
206 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
207 cl::desc("instrument memory intrinsics"),
208 cl::Hidden, cl::init(true));
209
210static cl::opt<bool>
211 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
212 cl::desc("instrument landing pads"), cl::Hidden,
213 cl::init(false));
214
216 "hwasan-use-short-granules",
217 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
218 cl::init(false));
219
221 "hwasan-instrument-personality-functions",
222 cl::desc("instrument personality functions"), cl::Hidden);
223
224static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
225 cl::desc("inline all checks"),
226 cl::Hidden, cl::init(false));
227
228static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
229 cl::desc("inline all checks"),
230 cl::Hidden, cl::init(false));
231
232// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
233static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
234 cl::desc("Use page aliasing in HWASan"),
235 cl::Hidden, cl::init(false));
236
237namespace {
238
239bool shouldUsePageAliases(const Triple &TargetTriple) {
240 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
241}
242
243bool shouldInstrumentStack(const Triple &TargetTriple) {
244 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
245}
246
247bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
248 return ClInstrumentWithCalls.getNumOccurrences()
250 : TargetTriple.getArch() == Triple::x86_64;
251}
252
253bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
254 return ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
255 : !DisableOptimization;
256}
257
258bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
259 bool DisableOptimization) {
260 return shouldInstrumentStack(TargetTriple) &&
261 mightUseStackSafetyAnalysis(DisableOptimization);
262}
263
264bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
265 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
266}
267
268/// An instrumentation pass implementing detection of addressability bugs
269/// using tagged pointers.
270class HWAddressSanitizer {
271public:
272 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
273 const StackSafetyGlobalInfo *SSI)
274 : M(M), SSI(SSI) {
275 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
276 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
278 : CompileKernel;
279
280 initializeModule();
281 }
282
283 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
284
285private:
286 struct ShadowTagCheckInfo {
287 Instruction *TagMismatchTerm = nullptr;
288 Value *PtrLong = nullptr;
289 Value *AddrLong = nullptr;
290 Value *PtrTag = nullptr;
291 Value *MemTag = nullptr;
292 };
293 void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
294
295 void initializeModule();
296 void createHwasanCtorComdat();
297
298 void initializeCallbacks(Module &M);
299
300 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
301
302 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
303 Value *getShadowNonTls(IRBuilder<> &IRB);
304
305 void untagPointerOperand(Instruction *I, Value *Addr);
306 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
307
308 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
309 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
310 DomTreeUpdater &DTU, LoopInfo *LI);
311 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
312 unsigned AccessSizeIndex,
313 Instruction *InsertBefore,
314 DomTreeUpdater &DTU, LoopInfo *LI);
315 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
316 unsigned AccessSizeIndex,
317 Instruction *InsertBefore, DomTreeUpdater &DTU,
318 LoopInfo *LI);
319 bool ignoreMemIntrinsic(MemIntrinsic *MI);
320 void instrumentMemIntrinsic(MemIntrinsic *MI);
321 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
322 LoopInfo *LI);
323 bool ignoreAccess(Instruction *Inst, Value *Ptr);
324 void getInterestingMemoryOperands(
325 Instruction *I, const TargetLibraryInfo &TLI,
327
328 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
329 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
330 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
331 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
332 const DominatorTree &DT, const PostDominatorTree &PDT,
333 const LoopInfo &LI);
334 Value *readRegister(IRBuilder<> &IRB, StringRef Name);
335 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
336 Value *getNextTagWithCall(IRBuilder<> &IRB);
337 Value *getStackBaseTag(IRBuilder<> &IRB);
338 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
339 Value *getUARTag(IRBuilder<> &IRB);
340
341 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
342 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
343 unsigned retagMask(unsigned AllocaNo);
344
345 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
346
347 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
348 void instrumentGlobals();
349
350 Value *getPC(IRBuilder<> &IRB);
351 Value *getSP(IRBuilder<> &IRB);
352 Value *getFrameRecordInfo(IRBuilder<> &IRB);
353
354 void instrumentPersonalityFunctions();
355
356 LLVMContext *C;
357 Module &M;
358 const StackSafetyGlobalInfo *SSI;
359 Triple TargetTriple;
360
361 /// This struct defines the shadow mapping using the rule:
362 /// shadow = (mem >> Scale) + Offset.
363 /// If InGlobal is true, then
364 /// extern char __hwasan_shadow[];
365 /// shadow = (mem >> Scale) + &__hwasan_shadow
366 /// If InTls is true, then
367 /// extern char *__hwasan_tls;
368 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
369 ///
370 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
371 /// ring buffer for storing stack allocations on targets that support it.
372 struct ShadowMapping {
373 uint8_t Scale;
375 bool InGlobal;
376 bool InTls;
377 bool WithFrameRecord;
378
379 void init(Triple &TargetTriple, bool InstrumentWithCalls);
380 Align getObjectAlignment() const { return Align(1ULL << Scale); }
381 };
382
383 ShadowMapping Mapping;
384
385 Type *VoidTy = Type::getVoidTy(M.getContext());
386 Type *IntptrTy;
387 PointerType *PtrTy;
388 Type *Int8Ty;
389 Type *Int32Ty;
390 Type *Int64Ty = Type::getInt64Ty(M.getContext());
391
392 bool CompileKernel;
393 bool Recover;
394 bool OutlinedChecks;
395 bool InlineFastPath;
396 bool UseShortGranules;
397 bool InstrumentLandingPads;
398 bool InstrumentWithCalls;
399 bool InstrumentStack;
400 bool DetectUseAfterScope;
401 bool UsePageAliases;
402 bool UseMatchAllCallback;
403
404 std::optional<uint8_t> MatchAllTag;
405
406 unsigned PointerTagShift;
407 uint64_t TagMaskByte;
408
409 Function *HwasanCtorFunction;
410
411 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
412 FunctionCallee HwasanMemoryAccessCallbackSized[2];
413
414 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
415 FunctionCallee HwasanHandleVfork;
416
417 FunctionCallee HwasanTagMemoryFunc;
418 FunctionCallee HwasanGenerateTagFunc;
419 FunctionCallee HwasanRecordFrameRecordFunc;
420
421 Constant *ShadowGlobal;
422
423 Value *ShadowBase = nullptr;
424 Value *StackBaseTag = nullptr;
425 Value *CachedSP = nullptr;
426 GlobalValue *ThreadPtrGlobal = nullptr;
427};
428
429} // end anonymous namespace
430
433 const StackSafetyGlobalInfo *SSI = nullptr;
434 auto TargetTriple = llvm::Triple(M.getTargetTriple());
435 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
437
438 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
439 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
440 for (Function &F : M)
441 HWASan.sanitizeFunction(F, FAM);
442
444 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
445 // are incrementally updated throughout this pass whenever
446 // SplitBlockAndInsertIfThen is called.
450 // GlobalsAA is considered stateless and does not get invalidated unless
451 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
452 // make changes that require GlobalsAA to be invalidated.
453 PA.abandon<GlobalsAA>();
454 return PA;
455}
457 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
459 OS, MapClassName2PassName);
460 OS << '<';
461 if (Options.CompileKernel)
462 OS << "kernel;";
463 if (Options.Recover)
464 OS << "recover";
465 OS << '>';
466}
467
468void HWAddressSanitizer::createHwasanCtorComdat() {
469 std::tie(HwasanCtorFunction, std::ignore) =
472 /*InitArgTypes=*/{},
473 /*InitArgs=*/{},
474 // This callback is invoked when the functions are created the first
475 // time. Hook them into the global ctors list in that case:
476 [&](Function *Ctor, FunctionCallee) {
477 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
478 Ctor->setComdat(CtorComdat);
479 appendToGlobalCtors(M, Ctor, 0, Ctor);
480 });
481
482 // Create a note that contains pointers to the list of global
483 // descriptors. Adding a note to the output file will cause the linker to
484 // create a PT_NOTE program header pointing to the note that we can use to
485 // find the descriptor list starting from the program headers. A function
486 // provided by the runtime initializes the shadow memory for the globals by
487 // accessing the descriptor list via the note. The dynamic loader needs to
488 // call this function whenever a library is loaded.
489 //
490 // The reason why we use a note for this instead of a more conventional
491 // approach of having a global constructor pass a descriptor list pointer to
492 // the runtime is because of an order of initialization problem. With
493 // constructors we can encounter the following problematic scenario:
494 //
495 // 1) library A depends on library B and also interposes one of B's symbols
496 // 2) B's constructors are called before A's (as required for correctness)
497 // 3) during construction, B accesses one of its "own" globals (actually
498 // interposed by A) and triggers a HWASAN failure due to the initialization
499 // for A not having happened yet
500 //
501 // Even without interposition it is possible to run into similar situations in
502 // cases where two libraries mutually depend on each other.
503 //
504 // We only need one note per binary, so put everything for the note in a
505 // comdat. This needs to be a comdat with an .init_array section to prevent
506 // newer versions of lld from discarding the note.
507 //
508 // Create the note even if we aren't instrumenting globals. This ensures that
509 // binaries linked from object files with both instrumented and
510 // non-instrumented globals will end up with a note, even if a comdat from an
511 // object file with non-instrumented globals is selected. The note is harmless
512 // if the runtime doesn't support it, since it will just be ignored.
513 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
514
515 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
516 auto *Start =
517 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
518 nullptr, "__start_hwasan_globals");
519 Start->setVisibility(GlobalValue::HiddenVisibility);
520 auto *Stop =
521 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
522 nullptr, "__stop_hwasan_globals");
523 Stop->setVisibility(GlobalValue::HiddenVisibility);
524
525 // Null-terminated so actually 8 bytes, which are required in order to align
526 // the note properly.
527 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
528
529 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
531 auto *Note =
532 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
534 Note->setSection(".note.hwasan.globals");
535 Note->setComdat(NoteComdat);
536 Note->setAlignment(Align(4));
537
538 // The pointers in the note need to be relative so that the note ends up being
539 // placed in rodata, which is the standard location for notes.
540 auto CreateRelPtr = [&](Constant *Ptr) {
544 Int32Ty);
545 };
546 Note->setInitializer(ConstantStruct::getAnon(
547 {ConstantInt::get(Int32Ty, 8), // n_namesz
548 ConstantInt::get(Int32Ty, 8), // n_descsz
550 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
552
553 // Create a zero-length global in hwasan_globals so that the linker will
554 // always create start and stop symbols.
555 auto *Dummy = new GlobalVariable(
556 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
557 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
558 Dummy->setSection("hwasan_globals");
559 Dummy->setComdat(NoteComdat);
560 Dummy->setMetadata(LLVMContext::MD_associated,
562 appendToCompilerUsed(M, Dummy);
563}
564
565/// Module-level initialization.
566///
567/// inserts a call to __hwasan_init to the module's constructor list.
568void HWAddressSanitizer::initializeModule() {
569 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
570 auto &DL = M.getDataLayout();
571
572 TargetTriple = Triple(M.getTargetTriple());
573
574 // x86_64 currently has two modes:
575 // - Intel LAM (default)
576 // - pointer aliasing (heap only)
577 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
578 UsePageAliases = shouldUsePageAliases(TargetTriple);
579 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
580 InstrumentStack = shouldInstrumentStack(TargetTriple);
581 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
582 PointerTagShift = IsX86_64 ? 57 : 56;
583 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
584
585 Mapping.init(TargetTriple, InstrumentWithCalls);
586
587 C = &(M.getContext());
588 IRBuilder<> IRB(*C);
589 IntptrTy = IRB.getIntPtrTy(DL);
590 PtrTy = IRB.getPtrTy();
591 Int8Ty = IRB.getInt8Ty();
592 Int32Ty = IRB.getInt32Ty();
593
594 HwasanCtorFunction = nullptr;
595
596 // Older versions of Android do not have the required runtime support for
597 // short granules, global or personality function instrumentation. On other
598 // platforms we currently require using the latest version of the runtime.
599 bool NewRuntime =
600 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
601
602 UseShortGranules =
603 ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
604 OutlinedChecks =
605 (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
606 TargetTriple.isOSBinFormatELF() &&
607 (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
608
609 InlineFastPath =
610 (ClInlineFastPathChecks.getNumOccurrences()
612 : !(TargetTriple.isAndroid() ||
613 TargetTriple.isOSFuchsia())); // These platforms may prefer less
614 // inlining to reduce binary size.
615
616 if (ClMatchAllTag.getNumOccurrences()) {
617 if (ClMatchAllTag != -1) {
618 MatchAllTag = ClMatchAllTag & 0xFF;
619 }
620 } else if (CompileKernel) {
621 MatchAllTag = 0xFF;
622 }
623 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
624
625 // If we don't have personality function support, fall back to landing pads.
626 InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
628 : !NewRuntime;
629
630 if (!CompileKernel) {
631 createHwasanCtorComdat();
632 bool InstrumentGlobals =
633 ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
634
635 if (InstrumentGlobals && !UsePageAliases)
636 instrumentGlobals();
637
638 bool InstrumentPersonalityFunctions =
639 ClInstrumentPersonalityFunctions.getNumOccurrences()
641 : NewRuntime;
642 if (InstrumentPersonalityFunctions)
643 instrumentPersonalityFunctions();
644 }
645
646 if (!TargetTriple.isAndroid()) {
647 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
648 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
650 "__hwasan_tls", nullptr,
653 return GV;
654 });
655 ThreadPtrGlobal = cast<GlobalVariable>(C);
656 }
657}
658
659void HWAddressSanitizer::initializeCallbacks(Module &M) {
660 IRBuilder<> IRB(*C);
661 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
662 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
663 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
664 *HwasanMemsetFnTy;
665 if (UseMatchAllCallback) {
666 HwasanMemoryAccessCallbackSizedFnTy =
667 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
668 HwasanMemoryAccessCallbackFnTy =
669 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
670 HwasanMemTransferFnTy =
671 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
672 HwasanMemsetFnTy =
673 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
674 } else {
675 HwasanMemoryAccessCallbackSizedFnTy =
676 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
677 HwasanMemoryAccessCallbackFnTy =
678 FunctionType::get(VoidTy, {IntptrTy}, false);
679 HwasanMemTransferFnTy =
680 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
681 HwasanMemsetFnTy =
682 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
683 }
684
685 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
686 const std::string TypeStr = AccessIsWrite ? "store" : "load";
687 const std::string EndingStr = Recover ? "_noabort" : "";
688
689 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
690 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
691 HwasanMemoryAccessCallbackSizedFnTy);
692
693 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
694 AccessSizeIndex++) {
695 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
696 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
697 itostr(1ULL << AccessSizeIndex) +
698 MatchAllStr + EndingStr,
699 HwasanMemoryAccessCallbackFnTy);
700 }
701 }
702
703 const std::string MemIntrinCallbackPrefix =
704 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
705 ? std::string("")
707
708 HwasanMemmove = M.getOrInsertFunction(
709 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
710 HwasanMemcpy = M.getOrInsertFunction(
711 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
712 HwasanMemset = M.getOrInsertFunction(
713 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
714
715 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
716 PtrTy, Int8Ty, IntptrTy);
717 HwasanGenerateTagFunc =
718 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
719
720 HwasanRecordFrameRecordFunc =
721 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
722
723 ShadowGlobal =
724 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
725
726 HwasanHandleVfork =
727 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
728}
729
730Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
731 // An empty inline asm with input reg == output reg.
732 // An opaque no-op cast, basically.
733 // This prevents code bloat as a result of rematerializing trivial definitions
734 // such as constants or global addresses at every load and store.
735 InlineAsm *Asm =
736 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
737 StringRef(""), StringRef("=r,0"),
738 /*hasSideEffects=*/false);
739 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
740}
741
742Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
743 return getOpaqueNoopCast(IRB, ShadowGlobal);
744}
745
746Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
747 if (Mapping.Offset != kDynamicShadowSentinel)
748 return getOpaqueNoopCast(
750 ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
751
752 if (Mapping.InGlobal)
753 return getDynamicShadowIfunc(IRB);
754
755 Value *GlobalDynamicAddress =
758 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
759}
760
761bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
762 // Do not instrument accesses from different address spaces; we cannot deal
763 // with them.
764 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
765 if (PtrTy->getPointerAddressSpace() != 0)
766 return true;
767
768 // Ignore swifterror addresses.
769 // swifterror memory addresses are mem2reg promoted by instruction
770 // selection. As such they cannot have regular uses like an instrumentation
771 // function and it makes no sense to track them as memory.
772 if (Ptr->isSwiftError())
773 return true;
774
775 if (findAllocaForValue(Ptr)) {
776 if (!InstrumentStack)
777 return true;
778 if (SSI && SSI->stackAccessIsSafe(*Inst))
779 return true;
780 }
781 return false;
782}
783
784void HWAddressSanitizer::getInterestingMemoryOperands(
785 Instruction *I, const TargetLibraryInfo &TLI,
787 // Skip memory accesses inserted by another instrumentation.
788 if (I->hasMetadata(LLVMContext::MD_nosanitize))
789 return;
790
791 // Do not instrument the load fetching the dynamic shadow address.
792 if (ShadowBase == I)
793 return;
794
795 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
796 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
797 return;
798 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
799 LI->getType(), LI->getAlign());
800 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
801 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
802 return;
803 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
804 SI->getValueOperand()->getType(), SI->getAlign());
805 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
806 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
807 return;
808 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
809 RMW->getValOperand()->getType(), std::nullopt);
810 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
811 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
812 return;
813 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
814 XCHG->getCompareOperand()->getType(),
815 std::nullopt);
816 } else if (auto *CI = dyn_cast<CallInst>(I)) {
817 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
818 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
819 ignoreAccess(I, CI->getArgOperand(ArgNo)))
820 continue;
821 Type *Ty = CI->getParamByValType(ArgNo);
822 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
823 }
825 }
826}
827
829 if (LoadInst *LI = dyn_cast<LoadInst>(I))
830 return LI->getPointerOperandIndex();
831 if (StoreInst *SI = dyn_cast<StoreInst>(I))
832 return SI->getPointerOperandIndex();
833 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
834 return RMW->getPointerOperandIndex();
835 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
836 return XCHG->getPointerOperandIndex();
837 report_fatal_error("Unexpected instruction");
838 return -1;
839}
840
842 size_t Res = llvm::countr_zero(TypeSize / 8);
844 return Res;
845}
846
847void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
848 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
849 TargetTriple.isRISCV64())
850 return;
851
852 IRBuilder<> IRB(I);
853 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
854 Value *UntaggedPtr =
855 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
856 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
857}
858
859Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
860 // Mem >> Scale
861 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
862 if (Mapping.Offset == 0)
863 return IRB.CreateIntToPtr(Shadow, PtrTy);
864 // (Mem >> Scale) + Offset
865 return IRB.CreatePtrAdd(ShadowBase, Shadow);
866}
867
868int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
869 unsigned AccessSizeIndex) {
870 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
871 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
872 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
873 (Recover << HWASanAccessInfo::RecoverShift) |
874 (IsWrite << HWASanAccessInfo::IsWriteShift) |
875 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
876}
877
878HWAddressSanitizer::ShadowTagCheckInfo
879HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
880 DomTreeUpdater &DTU, LoopInfo *LI) {
881 ShadowTagCheckInfo R;
882
883 IRBuilder<> IRB(InsertBefore);
884
885 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
886 R.PtrTag =
887 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
888 R.AddrLong = untagPointer(IRB, R.PtrLong);
889 Value *Shadow = memToShadow(R.AddrLong, IRB);
890 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
891 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
892
893 if (MatchAllTag.has_value()) {
894 Value *TagNotIgnored = IRB.CreateICmpNE(
895 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
896 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
897 }
898
899 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
900 TagMismatch, InsertBefore, false,
901 MDBuilder(*C).createBranchWeights(1, 100000), &DTU, LI);
902
903 return R;
904}
905
906void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
907 unsigned AccessSizeIndex,
908 Instruction *InsertBefore,
909 DomTreeUpdater &DTU,
910 LoopInfo *LI) {
911 assert(!UsePageAliases);
912 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
913
914 if (InlineFastPath)
915 InsertBefore =
916 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
917
918 IRBuilder<> IRB(InsertBefore);
921 M, UseShortGranules
922 ? Intrinsic::hwasan_check_memaccess_shortgranules
923 : Intrinsic::hwasan_check_memaccess),
924 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
925}
926
927void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
928 unsigned AccessSizeIndex,
929 Instruction *InsertBefore,
930 DomTreeUpdater &DTU,
931 LoopInfo *LI) {
932 assert(!UsePageAliases);
933 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
934
935 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
936
937 IRBuilder<> IRB(TCI.TagMismatchTerm);
938 Value *OutOfShortGranuleTagRange =
939 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
940 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
941 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
942 MDBuilder(*C).createBranchWeights(1, 100000), &DTU, LI);
943
944 IRB.SetInsertPoint(TCI.TagMismatchTerm);
945 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
946 PtrLowBits = IRB.CreateAdd(
947 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
948 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
949 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
950 MDBuilder(*C).createBranchWeights(1, 100000), &DTU,
951 LI, CheckFailTerm->getParent());
952
953 IRB.SetInsertPoint(TCI.TagMismatchTerm);
954 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
955 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
956 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
957 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
958 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
959 MDBuilder(*C).createBranchWeights(1, 100000), &DTU,
960 LI, CheckFailTerm->getParent());
961
962 IRB.SetInsertPoint(CheckFailTerm);
963 InlineAsm *Asm;
964 switch (TargetTriple.getArch()) {
965 case Triple::x86_64:
966 // The signal handler will find the data address in rdi.
968 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
969 "int3\nnopl " +
970 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
971 "(%rax)",
972 "{rdi}",
973 /*hasSideEffects=*/true);
974 break;
975 case Triple::aarch64:
977 // The signal handler will find the data address in x0.
979 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
980 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
981 "{x0}",
982 /*hasSideEffects=*/true);
983 break;
984 case Triple::riscv64:
985 // The signal handler will find the data address in x10.
987 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
988 "ebreak\naddiw x0, x11, " +
989 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
990 "{x10}",
991 /*hasSideEffects=*/true);
992 break;
993 default:
994 report_fatal_error("unsupported architecture");
995 }
996 IRB.CreateCall(Asm, TCI.PtrLong);
997 if (Recover)
998 cast<BranchInst>(CheckFailTerm)
999 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1000}
1001
1002bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
1003 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1004 return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
1005 (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
1006 }
1007 if (isa<MemSetInst>(MI))
1008 return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
1009 return false;
1010}
1011
1012void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1013 IRBuilder<> IRB(MI);
1014 if (isa<MemTransferInst>(MI)) {
1016 MI->getOperand(0), MI->getOperand(1),
1017 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1018
1019 if (UseMatchAllCallback)
1020 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1021 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1022 } else if (isa<MemSetInst>(MI)) {
1024 MI->getOperand(0),
1025 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1026 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1027 if (UseMatchAllCallback)
1028 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1029 IRB.CreateCall(HwasanMemset, Args);
1030 }
1031 MI->eraseFromParent();
1032}
1033
1034bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1035 DomTreeUpdater &DTU,
1036 LoopInfo *LI) {
1037 Value *Addr = O.getPtr();
1038
1039 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1040
1041 if (O.MaybeMask)
1042 return false; // FIXME
1043
1044 IRBuilder<> IRB(O.getInsn());
1045 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1046 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1047 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1048 *O.Alignment >= O.TypeStoreSize / 8)) {
1049 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1050 if (InstrumentWithCalls) {
1052 if (UseMatchAllCallback)
1053 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1054 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1055 Args);
1056 } else if (OutlinedChecks) {
1057 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1058 DTU, LI);
1059 } else {
1060 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1061 DTU, LI);
1062 }
1063 } else {
1065 IRB.CreatePointerCast(Addr, IntptrTy),
1066 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1067 ConstantInt::get(IntptrTy, 8))};
1068 if (UseMatchAllCallback)
1069 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1070 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1071 }
1072 untagPointerOperand(O.getInsn(), Addr);
1073
1074 return true;
1075}
1076
1077void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1078 size_t Size) {
1079 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1080 if (!UseShortGranules)
1081 Size = AlignedSize;
1082
1083 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1084 if (InstrumentWithCalls) {
1085 IRB.CreateCall(HwasanTagMemoryFunc,
1086 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1087 ConstantInt::get(IntptrTy, AlignedSize)});
1088 } else {
1089 size_t ShadowSize = Size >> Mapping.Scale;
1090 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1091 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1092 // If this memset is not inlined, it will be intercepted in the hwasan
1093 // runtime library. That's OK, because the interceptor skips the checks if
1094 // the address is in the shadow region.
1095 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1096 // llvm.memset right here into either a sequence of stores, or a call to
1097 // hwasan_tag_memory.
1098 if (ShadowSize)
1099 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1100 if (Size != AlignedSize) {
1101 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1102 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1103 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1104 IRB.CreateStore(
1105 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1106 AlignedSize - 1));
1107 }
1108 }
1109}
1110
1111unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1112 if (TargetTriple.getArch() == Triple::x86_64)
1113 return AllocaNo & TagMaskByte;
1114
1115 // A list of 8-bit numbers that have at most one run of non-zero bits.
1116 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1117 // masks.
1118 // The list does not include the value 255, which is used for UAR.
1119 //
1120 // Because we are more likely to use earlier elements of this list than later
1121 // ones, it is sorted in increasing order of probability of collision with a
1122 // mask allocated (temporally) nearby. The program that generated this list
1123 // can be found at:
1124 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1125 static const unsigned FastMasks[] = {
1126 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1127 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1128 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1129 return FastMasks[AllocaNo % std::size(FastMasks)];
1130}
1131
1132Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1133 if (TagMaskByte == 0xFF)
1134 return OldTag; // No need to clear the tag byte.
1135 return IRB.CreateAnd(OldTag,
1136 ConstantInt::get(OldTag->getType(), TagMaskByte));
1137}
1138
1139Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1140 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1141}
1142
1143Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1145 return nullptr;
1146 if (StackBaseTag)
1147 return StackBaseTag;
1148 // Extract some entropy from the stack pointer for the tags.
1149 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1150 // between functions).
1151 Value *StackPointerLong = getSP(IRB);
1152 Value *StackTag =
1153 applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1154 IRB.CreateLShr(StackPointerLong, 20)));
1155 StackTag->setName("hwasan.stack.base.tag");
1156 return StackTag;
1157}
1158
1159Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1160 unsigned AllocaNo) {
1162 return getNextTagWithCall(IRB);
1163 return IRB.CreateXor(
1164 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1165}
1166
1167Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1168 Value *StackPointerLong = getSP(IRB);
1169 Value *UARTag =
1170 applyTagMask(IRB, IRB.CreateLShr(StackPointerLong, PointerTagShift));
1171
1172 UARTag->setName("hwasan.uar.tag");
1173 return UARTag;
1174}
1175
1176// Add a tag to an address.
1177Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1178 Value *PtrLong, Value *Tag) {
1179 assert(!UsePageAliases);
1180 Value *TaggedPtrLong;
1181 if (CompileKernel) {
1182 // Kernel addresses have 0xFF in the most significant byte.
1183 Value *ShiftedTag =
1184 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1185 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1186 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1187 } else {
1188 // Userspace can simply do OR (tag << PointerTagShift);
1189 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1190 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1191 }
1192 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1193}
1194
1195// Remove tag from an address.
1196Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1197 assert(!UsePageAliases);
1198 Value *UntaggedPtrLong;
1199 if (CompileKernel) {
1200 // Kernel addresses have 0xFF in the most significant byte.
1201 UntaggedPtrLong =
1202 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1203 TagMaskByte << PointerTagShift));
1204 } else {
1205 // Userspace addresses have 0x00.
1206 UntaggedPtrLong = IRB.CreateAnd(
1207 PtrLong, ConstantInt::get(PtrLong->getType(),
1208 ~(TagMaskByte << PointerTagShift)));
1209 }
1210 return UntaggedPtrLong;
1211}
1212
1213Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1214 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1215 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1216 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1217 // in Bionic's libc/private/bionic_tls.h.
1218 Function *ThreadPointerFunc =
1219 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1220 return IRB.CreateConstGEP1_32(Int8Ty, IRB.CreateCall(ThreadPointerFunc),
1221 0x30);
1222 }
1223 if (ThreadPtrGlobal)
1224 return ThreadPtrGlobal;
1225
1226 return nullptr;
1227}
1228
1229Value *HWAddressSanitizer::getPC(IRBuilder<> &IRB) {
1230 if (TargetTriple.getArch() == Triple::aarch64)
1231 return readRegister(IRB, "pc");
1232 return IRB.CreatePtrToInt(IRB.GetInsertBlock()->getParent(), IntptrTy);
1233}
1234
1235Value *HWAddressSanitizer::getSP(IRBuilder<> &IRB) {
1236 if (!CachedSP) {
1237 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
1238 // first).
1239 Function *F = IRB.GetInsertBlock()->getParent();
1240 Module *M = F->getParent();
1241 auto *GetStackPointerFn = Intrinsic::getDeclaration(
1242 M, Intrinsic::frameaddress,
1243 IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
1244 CachedSP = IRB.CreatePtrToInt(
1245 IRB.CreateCall(GetStackPointerFn, {Constant::getNullValue(Int32Ty)}),
1246 IntptrTy);
1247 }
1248 return CachedSP;
1249}
1250
1251Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1252 // Prepare ring buffer data.
1253 Value *PC = getPC(IRB);
1254 Value *SP = getSP(IRB);
1255
1256 // Mix SP and PC.
1257 // Assumptions:
1258 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1259 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
1260 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1261 // 0xSSSSPPPPPPPPPPPP
1262 SP = IRB.CreateShl(SP, 44);
1263 return IRB.CreateOr(PC, SP);
1264}
1265
1266void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1267 if (!Mapping.InTls)
1268 ShadowBase = getShadowNonTls(IRB);
1269 else if (!WithFrameRecord && TargetTriple.isAndroid())
1270 ShadowBase = getDynamicShadowIfunc(IRB);
1271
1272 if (!WithFrameRecord && ShadowBase)
1273 return;
1274
1275 Value *SlotPtr = nullptr;
1276 Value *ThreadLong = nullptr;
1277 Value *ThreadLongMaybeUntagged = nullptr;
1278
1279 auto getThreadLongMaybeUntagged = [&]() {
1280 if (!SlotPtr)
1281 SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1282 if (!ThreadLong)
1283 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1284 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1285 // TBI.
1286 return TargetTriple.isAArch64() ? ThreadLong
1287 : untagPointer(IRB, ThreadLong);
1288 };
1289
1290 if (WithFrameRecord) {
1291 switch (ClRecordStackHistory) {
1292 case libcall: {
1293 // Emit a runtime call into hwasan rather than emitting instructions for
1294 // recording stack history.
1295 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1296 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1297 break;
1298 }
1299 case instr: {
1300 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1301
1302 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1303
1304 // Store data to ring buffer.
1305 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1306 Value *RecordPtr =
1307 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1308 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1309
1310 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1311 // buffer in pages, it must be a power of two, and the start of the buffer
1312 // must be aligned by twice that much. Therefore wrap around of the ring
1313 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1314 // The use of AShr instead of LShr is due to
1315 // https://bugs.llvm.org/show_bug.cgi?id=39030
1316 // Runtime library makes sure not to use the highest bit.
1317 Value *WrapMask = IRB.CreateXor(
1318 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1319 ConstantInt::get(IntptrTy, (uint64_t)-1));
1320 Value *ThreadLongNew = IRB.CreateAnd(
1321 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1322 IRB.CreateStore(ThreadLongNew, SlotPtr);
1323 break;
1324 }
1325 case none: {
1327 "A stack history recording mode should've been selected.");
1328 }
1329 }
1330 }
1331
1332 if (!ShadowBase) {
1333 if (!ThreadLongMaybeUntagged)
1334 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1335
1336 // Get shadow base address by aligning RecordPtr up.
1337 // Note: this is not correct if the pointer is already aligned.
1338 // Runtime library will make sure this never happens.
1339 ShadowBase = IRB.CreateAdd(
1340 IRB.CreateOr(
1341 ThreadLongMaybeUntagged,
1342 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1343 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1344 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1345 }
1346}
1347
1348Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1349 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1350 Function *ReadRegister =
1351 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1352 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1353 Value *Args[] = {MetadataAsValue::get(*C, MD)};
1354 return IRB.CreateCall(ReadRegister, Args);
1355}
1356
1357bool HWAddressSanitizer::instrumentLandingPads(
1358 SmallVectorImpl<Instruction *> &LandingPadVec) {
1359 for (auto *LP : LandingPadVec) {
1360 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1361 IRB.CreateCall(
1362 HwasanHandleVfork,
1363 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1364 : "sp")});
1365 }
1366 return true;
1367}
1368
1370 auto *II = dyn_cast<IntrinsicInst>(V);
1371 return II && II->isLifetimeStartOrEnd();
1372}
1373
1375 return dyn_cast<DbgAssignIntrinsic>(DVI);
1376}
1377
1379 return DPV->isDbgAssign() ? DPV : nullptr;
1380}
1381
1382bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1383 Value *StackTag, Value *UARTag,
1384 const DominatorTree &DT,
1385 const PostDominatorTree &PDT,
1386 const LoopInfo &LI) {
1387 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1388 // alloca addresses using that. Unfortunately, offsets are not known yet
1389 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1390 // temp, shift-OR it into each alloca address and xor with the retag mask.
1391 // This generates one extra instruction per alloca use.
1392 unsigned int I = 0;
1393
1394 for (auto &KV : SInfo.AllocasToInstrument) {
1395 auto N = I++;
1396 auto *AI = KV.first;
1397 memtag::AllocaInfo &Info = KV.second;
1399
1400 // Replace uses of the alloca with tagged address.
1401 Value *Tag = getAllocaTag(IRB, StackTag, N);
1402 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1403 Value *AINoTagLong = untagPointer(IRB, AILong);
1404 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1405 std::string Name =
1406 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1407 Replacement->setName(Name + ".hwasan");
1408
1409 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1410 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1411
1412 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1413
1414 auto HandleLifetime = [&](IntrinsicInst *II) {
1415 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1416 // set of assumptions we need to make about the lifetime. Without this we
1417 // would need to ensure that we can track the lifetime pointer to a
1418 // constant offset from the alloca, and would still need to change the
1419 // size to include the extra alignment we use for the untagging to make
1420 // the size consistent.
1421 //
1422 // The check for standard lifetime below makes sure that we have exactly
1423 // one set of start / end in any execution (i.e. the ends are not
1424 // reachable from each other), so this will not cause any problems.
1425 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1426 II->setArgOperand(1, AICast);
1427 };
1428 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1429 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1430
1431 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1432 auto *User = U.getUser();
1433 return User != AILong && User != AICast && !isLifetimeIntrinsic(User);
1434 });
1435
1436 // Helper utility for adding DW_OP_LLVM_tag_offset to debug-info records,
1437 // abstracted over whether they're intrinsic-stored or DPValue stored.
1438 auto AnnotateDbgRecord = [&](auto *DPtr) {
1439 // Prepend "tag_offset, N" to the dwarf expression.
1440 // Tag offset logically applies to the alloca pointer, and it makes sense
1441 // to put it at the beginning of the expression.
1443 retagMask(N)};
1444 for (size_t LocNo = 0; LocNo < DPtr->getNumVariableLocationOps(); ++LocNo)
1445 if (DPtr->getVariableLocationOp(LocNo) == AI)
1446 DPtr->setExpression(DIExpression::appendOpsToArg(
1447 DPtr->getExpression(), NewOps, LocNo));
1448 if (auto *DAI = DynCastToDbgAssign(DPtr)) {
1449 if (DAI->getAddress() == AI)
1450 DAI->setAddressExpression(DIExpression::prependOpcodes(
1451 DAI->getAddressExpression(), NewOps));
1452 }
1453 };
1454
1455 llvm::for_each(Info.DbgVariableIntrinsics, AnnotateDbgRecord);
1456 llvm::for_each(Info.DbgVariableRecords, AnnotateDbgRecord);
1457
1458 auto TagEnd = [&](Instruction *Node) {
1459 IRB.SetInsertPoint(Node);
1460 // When untagging, use the `AlignedSize` because we need to set the tags
1461 // for the entire alloca to original. If we used `Size` here, we would
1462 // keep the last granule tagged, and store zero in the last byte of the
1463 // last granule, due to how short granules are implemented.
1464 tagAlloca(IRB, AI, UARTag, AlignedSize);
1465 };
1466 // Calls to functions that may return twice (e.g. setjmp) confuse the
1467 // postdominator analysis, and will leave us to keep memory tagged after
1468 // function return. Work around this by always untagging at every return
1469 // statement if return_twice functions are called.
1470 bool StandardLifetime =
1471 !SInfo.CallsReturnTwice &&
1472 SInfo.UnrecognizedLifetimes.empty() &&
1473 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1474 &LI, ClMaxLifetimes);
1475 if (DetectUseAfterScope && StandardLifetime) {
1476 IntrinsicInst *Start = Info.LifetimeStart[0];
1477 IRB.SetInsertPoint(Start->getNextNode());
1478 tagAlloca(IRB, AI, Tag, Size);
1479 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1480 SInfo.RetVec, TagEnd)) {
1481 for (auto *End : Info.LifetimeEnd)
1482 End->eraseFromParent();
1483 }
1484 } else {
1485 tagAlloca(IRB, AI, Tag, Size);
1486 for (auto *RI : SInfo.RetVec)
1487 TagEnd(RI);
1488 // We inserted tagging outside of the lifetimes, so we have to remove
1489 // them.
1490 for (auto &II : Info.LifetimeStart)
1491 II->eraseFromParent();
1492 for (auto &II : Info.LifetimeEnd)
1493 II->eraseFromParent();
1494 }
1495 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1496 }
1497 for (auto &I : SInfo.UnrecognizedLifetimes)
1498 I->eraseFromParent();
1499 return true;
1500}
1501
1502void HWAddressSanitizer::sanitizeFunction(Function &F,
1504 if (&F == HwasanCtorFunction)
1505 return;
1506
1507 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1508 return;
1509
1510 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1511
1512 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1513 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1514 SmallVector<Instruction *, 8> LandingPadVec;
1516
1517 memtag::StackInfoBuilder SIB(SSI);
1518 for (auto &Inst : instructions(F)) {
1519 if (InstrumentStack) {
1520 SIB.visit(Inst);
1521 }
1522
1523 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1524 LandingPadVec.push_back(&Inst);
1525
1526 getInterestingMemoryOperands(&Inst, TLI, OperandsToInstrument);
1527
1528 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1529 if (!ignoreMemIntrinsic(MI))
1530 IntrinToInstrument.push_back(MI);
1531 }
1532
1533 memtag::StackInfo &SInfo = SIB.get();
1534
1535 initializeCallbacks(*F.getParent());
1536
1537 if (!LandingPadVec.empty())
1538 instrumentLandingPads(LandingPadVec);
1539
1540 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1541 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1542 // __hwasan_personality_thunk is a no-op for functions without an
1543 // instrumented stack, so we can drop it.
1544 F.setPersonalityFn(nullptr);
1545 }
1546
1547 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1548 IntrinToInstrument.empty())
1549 return;
1550
1551 assert(!ShadowBase);
1552
1553 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1554 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1555 emitPrologue(EntryIRB,
1556 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1557 Mapping.WithFrameRecord &&
1558 !SInfo.AllocasToInstrument.empty());
1559
1560 if (!SInfo.AllocasToInstrument.empty()) {
1563 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1564 Value *StackTag = getStackBaseTag(EntryIRB);
1565 Value *UARTag = getUARTag(EntryIRB);
1566 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1567 }
1568
1569 // If we split the entry block, move any allocas that were originally in the
1570 // entry block back into the entry block so that they aren't treated as
1571 // dynamic allocas.
1572 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1573 InsertPt = F.getEntryBlock().begin();
1574 for (Instruction &I :
1575 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1576 if (auto *AI = dyn_cast<AllocaInst>(&I))
1577 if (isa<ConstantInt>(AI->getArraySize()))
1578 I.moveBefore(F.getEntryBlock(), InsertPt);
1579 }
1580 }
1581
1586 for (auto &Operand : OperandsToInstrument)
1587 instrumentMemAccess(Operand, DTU, LI);
1588 DTU.flush();
1589
1590 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1591 for (auto *Inst : IntrinToInstrument)
1592 instrumentMemIntrinsic(Inst);
1593 }
1594
1595 ShadowBase = nullptr;
1596 StackBaseTag = nullptr;
1597 CachedSP = nullptr;
1598}
1599
1600void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1601 assert(!UsePageAliases);
1602 Constant *Initializer = GV->getInitializer();
1603 uint64_t SizeInBytes =
1604 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1605 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1606 if (SizeInBytes != NewSize) {
1607 // Pad the initializer out to the next multiple of 16 bytes and add the
1608 // required short granule tag.
1609 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1610 Init.back() = Tag;
1612 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1613 }
1614
1615 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1616 GlobalValue::ExternalLinkage, Initializer,
1617 GV->getName() + ".hwasan");
1618 NewGV->copyAttributesFrom(GV);
1619 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1620 NewGV->copyMetadata(GV, 0);
1621 NewGV->setAlignment(
1622 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1623
1624 // It is invalid to ICF two globals that have different tags. In the case
1625 // where the size of the global is a multiple of the tag granularity the
1626 // contents of the globals may be the same but the tags (i.e. symbol values)
1627 // may be different, and the symbols are not considered during ICF. In the
1628 // case where the size is not a multiple of the granularity, the short granule
1629 // tags would discriminate two globals with different tags, but there would
1630 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1631 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1632 // granule tag in the last byte.
1633 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1634
1635 // Descriptor format (assuming little-endian):
1636 // bytes 0-3: relative address of global
1637 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1638 // it isn't, we create multiple descriptors)
1639 // byte 7: tag
1640 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1641 const uint64_t MaxDescriptorSize = 0xfffff0;
1642 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1643 DescriptorPos += MaxDescriptorSize) {
1644 auto *Descriptor =
1645 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1646 nullptr, GV->getName() + ".hwasan.descriptor");
1647 auto *GVRelPtr = ConstantExpr::getTrunc(
1650 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1651 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1652 ConstantInt::get(Int64Ty, DescriptorPos)),
1653 Int32Ty);
1654 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1655 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1656 Descriptor->setComdat(NewGV->getComdat());
1657 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1658 Descriptor->setSection("hwasan_globals");
1659 Descriptor->setMetadata(LLVMContext::MD_associated,
1661 appendToCompilerUsed(M, Descriptor);
1662 }
1663
1666 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1667 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1668 GV->getType());
1669 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1670 GV->getLinkage(), "", Aliasee, &M);
1671 Alias->setVisibility(GV->getVisibility());
1672 Alias->takeName(GV);
1673 GV->replaceAllUsesWith(Alias);
1674 GV->eraseFromParent();
1675}
1676
1677void HWAddressSanitizer::instrumentGlobals() {
1678 std::vector<GlobalVariable *> Globals;
1679 for (GlobalVariable &GV : M.globals()) {
1681 continue;
1682
1683 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1684 GV.isThreadLocal())
1685 continue;
1686
1687 // Common symbols can't have aliases point to them, so they can't be tagged.
1688 if (GV.hasCommonLinkage())
1689 continue;
1690
1691 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1692 // which would be broken both by adding tags and potentially by the extra
1693 // padding/alignment that we insert.
1694 if (GV.hasSection())
1695 continue;
1696
1697 Globals.push_back(&GV);
1698 }
1699
1700 MD5 Hasher;
1701 Hasher.update(M.getSourceFileName());
1702 MD5::MD5Result Hash;
1703 Hasher.final(Hash);
1704 uint8_t Tag = Hash[0];
1705
1706 assert(TagMaskByte >= 16);
1707
1708 for (GlobalVariable *GV : Globals) {
1709 // Don't allow globals to be tagged with something that looks like a
1710 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1711 // the fast path shadow-vs-address check succeeds.
1712 if (Tag < 16 || Tag > TagMaskByte)
1713 Tag = 16;
1714 instrumentGlobal(GV, Tag++);
1715 }
1716}
1717
1718void HWAddressSanitizer::instrumentPersonalityFunctions() {
1719 // We need to untag stack frames as we unwind past them. That is the job of
1720 // the personality function wrapper, which either wraps an existing
1721 // personality function or acts as a personality function on its own. Each
1722 // function that has a personality function or that can be unwound past has
1723 // its personality function changed to a thunk that calls the personality
1724 // function wrapper in the runtime.
1726 for (Function &F : M) {
1727 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1728 continue;
1729
1730 if (F.hasPersonalityFn()) {
1731 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1732 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1733 PersonalityFns[nullptr].push_back(&F);
1734 }
1735 }
1736
1737 if (PersonalityFns.empty())
1738 return;
1739
1740 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1741 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1742 PtrTy, PtrTy, PtrTy, PtrTy);
1743 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1744 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1745
1746 for (auto &P : PersonalityFns) {
1747 std::string ThunkName = kHwasanPersonalityThunkName;
1748 if (P.first)
1749 ThunkName += ("." + P.first->getName()).str();
1750 FunctionType *ThunkFnTy = FunctionType::get(
1751 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1752 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1753 cast<GlobalValue>(P.first)->hasLocalLinkage());
1754 auto *ThunkFn = Function::Create(ThunkFnTy,
1757 ThunkName, &M);
1758 if (!IsLocal) {
1759 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1760 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1761 }
1762
1763 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1764 IRBuilder<> IRB(BB);
1765 CallInst *WrapperCall = IRB.CreateCall(
1766 HwasanPersonalityWrapper,
1767 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1768 ThunkFn->getArg(3), ThunkFn->getArg(4),
1769 P.first ? P.first : Constant::getNullValue(PtrTy),
1770 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1771 WrapperCall->setTailCall();
1772 IRB.CreateRet(WrapperCall);
1773
1774 for (Function *F : P.second)
1775 F->setPersonalityFn(ThunkFn);
1776 }
1777}
1778
1779void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1780 bool InstrumentWithCalls) {
1781 Scale = kDefaultShadowScale;
1782 if (TargetTriple.isOSFuchsia()) {
1783 // Fuchsia is always PIE, which means that the beginning of the address
1784 // space is always available.
1785 InGlobal = false;
1786 InTls = false;
1787 Offset = 0;
1788 WithFrameRecord = true;
1789 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1790 InGlobal = false;
1791 InTls = false;
1793 WithFrameRecord = false;
1794 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1795 InGlobal = false;
1796 InTls = false;
1797 Offset = 0;
1798 WithFrameRecord = false;
1799 } else if (ClWithIfunc) {
1800 InGlobal = true;
1801 InTls = false;
1803 WithFrameRecord = false;
1804 } else if (ClWithTls) {
1805 InGlobal = false;
1806 InTls = true;
1808 WithFrameRecord = true;
1809 } else {
1810 InGlobal = false;
1811 InTls = false;
1813 WithFrameRecord = false;
1814 }
1815}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
This file contains the simple types necessary to represent the attributes associated with functions a...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:691
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static DbgAssignIntrinsic * DynCastToDbgAssign(DbgVariableIntrinsic *DVI)
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
IRTranslator LLVM IR MI
Select target instructions out of generic instructions
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:101
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:348
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:519
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:500
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:522
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:727
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:207
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:214
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:690
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2042
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2460
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2028
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2453
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2014
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:461
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
This represents the llvm.dbg.assign instruction.
This is the common base class for debug info intrinsics for variables.
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:275
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:162
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:518
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:80
void setComdat(Comdat *C)
Definition: Globals.cpp:197
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:228
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
LinkageTypes getLinkage() const
Definition: GlobalValue.h:545
bool isDeclarationForLinker() const
Definition: GlobalValue.h:617
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:355
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
bool hasCommonLinkage() const
Definition: GlobalValue.h:531
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:455
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2006
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1880
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1977
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2153
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:589
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2105
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:104
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1431
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:520
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1089
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1372
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2228
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2232
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1789
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1410
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2010
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1469
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1802
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2100
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1491
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:563
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2236
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2179
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1450
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1513
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2649
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:658
const BasicBlock * getParent() const
Definition: Instruction.h:150
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:178
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:597
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:103
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:200
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:112
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:162
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:129
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
An instruction for storing to memory.
Definition: Instructions.h:302
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:755
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:753
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:361
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:973
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:895
bool isOSFuchsia() const
Definition: Triple.h:572
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:703
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:492
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1692
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1447
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ DW_OP_LLVM_tag_offset
Only used in LLVM metadata.
Definition: Dwarf.h:143
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1724
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
static bool isLifetimeIntrinsic(Intrinsic::ID ID)
Check if ID corresponds to a lifetime intrinsic.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:73
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3997
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:91
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec