Bug Summary

File:llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
Warning:line 1020, column 17
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name HWAddressSanitizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Instrumentation -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Instrumentation -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Instrumentation -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Instrumentation -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
1//===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address sanity checker
11/// based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/StringExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/ADT/Triple.h"
20#include "llvm/Analysis/CFG.h"
21#include "llvm/Analysis/PostDominators.h"
22#include "llvm/Analysis/StackSafetyAnalysis.h"
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/BinaryFormat/ELF.h"
25#include "llvm/IR/Attributes.h"
26#include "llvm/IR/BasicBlock.h"
27#include "llvm/IR/Constant.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/DebugInfoMetadata.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Dominators.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/IRBuilder.h"
35#include "llvm/IR/InlineAsm.h"
36#include "llvm/IR/InstVisitor.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/LLVMContext.h"
42#include "llvm/IR/MDBuilder.h"
43#include "llvm/IR/Module.h"
44#include "llvm/IR/Type.h"
45#include "llvm/IR/Value.h"
46#include "llvm/InitializePasses.h"
47#include "llvm/Pass.h"
48#include "llvm/PassRegistry.h"
49#include "llvm/Support/Casting.h"
50#include "llvm/Support/CommandLine.h"
51#include "llvm/Support/Debug.h"
52#include "llvm/Support/raw_ostream.h"
53#include "llvm/Transforms/Instrumentation.h"
54#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
55#include "llvm/Transforms/Utils/BasicBlockUtils.h"
56#include "llvm/Transforms/Utils/ModuleUtils.h"
57#include "llvm/Transforms/Utils/PromoteMemToReg.h"
58#include <sstream>
59
60using namespace llvm;
61
62#define DEBUG_TYPE"hwasan" "hwasan"
63
64const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
65const char kHwasanNoteName[] = "hwasan.note";
66const char kHwasanInitName[] = "__hwasan_init";
67const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
68
69const char kHwasanShadowMemoryDynamicAddress[] =
70 "__hwasan_shadow_memory_dynamic_address";
71
72// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
73static const size_t kNumberOfAccessSizes = 5;
74
75static const size_t kDefaultShadowScale = 4;
76static const uint64_t kDynamicShadowSentinel =
77 std::numeric_limits<uint64_t>::max();
78
79static const unsigned kShadowBaseAlignment = 32;
80
81static cl::opt<std::string>
82 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
83 cl::desc("Prefix for memory access callbacks"),
84 cl::Hidden, cl::init("__hwasan_"));
85
86static cl::opt<bool> ClInstrumentWithCalls(
87 "hwasan-instrument-with-calls",
88 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
89 cl::init(false));
90
91static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
92 cl::desc("instrument read instructions"),
93 cl::Hidden, cl::init(true));
94
95static cl::opt<bool>
96 ClInstrumentWrites("hwasan-instrument-writes",
97 cl::desc("instrument write instructions"), cl::Hidden,
98 cl::init(true));
99
100static cl::opt<bool> ClInstrumentAtomics(
101 "hwasan-instrument-atomics",
102 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
103 cl::init(true));
104
105static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
106 cl::desc("instrument byval arguments"),
107 cl::Hidden, cl::init(true));
108
109static cl::opt<bool>
110 ClRecover("hwasan-recover",
111 cl::desc("Enable recovery mode (continue-after-error)."),
112 cl::Hidden, cl::init(false));
113
114static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
115 cl::desc("instrument stack (allocas)"),
116 cl::Hidden, cl::init(true));
117
118static cl::opt<bool>
119 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
120 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
121 cl::Optional);
122
123static cl::opt<size_t> ClMaxLifetimes(
124 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
125 cl::ReallyHidden,
126 cl::desc("How many lifetime ends to handle for a single alloca."),
127 cl::Optional);
128
129static cl::opt<bool>
130 ClUseAfterScope("hwasan-use-after-scope",
131 cl::desc("detect use after scope within function"),
132 cl::Hidden, cl::init(false));
133
134static cl::opt<bool> ClUARRetagToZero(
135 "hwasan-uar-retag-to-zero",
136 cl::desc("Clear alloca tags before returning from the function to allow "
137 "non-instrumented and instrumented function calls mix. When set "
138 "to false, allocas are retagged before returning from the "
139 "function to detect use after return."),
140 cl::Hidden, cl::init(true));
141
142static cl::opt<bool> ClGenerateTagsWithCalls(
143 "hwasan-generate-tags-with-calls",
144 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
145 cl::init(false));
146
147static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
148 cl::Hidden, cl::init(false), cl::ZeroOrMore);
149
150static cl::opt<int> ClMatchAllTag(
151 "hwasan-match-all-tag",
152 cl::desc("don't report bad accesses via pointers with this tag"),
153 cl::Hidden, cl::init(-1));
154
155static cl::opt<bool>
156 ClEnableKhwasan("hwasan-kernel",
157 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
158 cl::Hidden, cl::init(false));
159
160// These flags allow to change the shadow mapping and control how shadow memory
161// is accessed. The shadow mapping looks like:
162// Shadow = (Mem >> scale) + offset
163
164static cl::opt<uint64_t>
165 ClMappingOffset("hwasan-mapping-offset",
166 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
167 cl::Hidden, cl::init(0));
168
169static cl::opt<bool>
170 ClWithIfunc("hwasan-with-ifunc",
171 cl::desc("Access dynamic shadow through an ifunc global on "
172 "platforms that support this"),
173 cl::Hidden, cl::init(false));
174
175static cl::opt<bool> ClWithTls(
176 "hwasan-with-tls",
177 cl::desc("Access dynamic shadow through an thread-local pointer on "
178 "platforms that support this"),
179 cl::Hidden, cl::init(true));
180
181static cl::opt<bool>
182 ClRecordStackHistory("hwasan-record-stack-history",
183 cl::desc("Record stack frames with tagged allocations "
184 "in a thread-local ring buffer"),
185 cl::Hidden, cl::init(true));
186static cl::opt<bool>
187 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
188 cl::desc("instrument memory intrinsics"),
189 cl::Hidden, cl::init(true));
190
191static cl::opt<bool>
192 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
193 cl::desc("instrument landing pads"), cl::Hidden,
194 cl::init(false), cl::ZeroOrMore);
195
196static cl::opt<bool> ClUseShortGranules(
197 "hwasan-use-short-granules",
198 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
199 cl::init(false), cl::ZeroOrMore);
200
201static cl::opt<bool> ClInstrumentPersonalityFunctions(
202 "hwasan-instrument-personality-functions",
203 cl::desc("instrument personality functions"), cl::Hidden, cl::init(false),
204 cl::ZeroOrMore);
205
206static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
207 cl::desc("inline all checks"),
208 cl::Hidden, cl::init(false));
209
210// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
211static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
212 cl::desc("Use page aliasing in HWASan"),
213 cl::Hidden, cl::init(false));
214
215namespace {
216
217bool shouldUsePageAliases(const Triple &TargetTriple) {
218 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
219}
220
221bool shouldInstrumentStack(const Triple &TargetTriple) {
222 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
223}
224
225bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
226 return ClInstrumentWithCalls || TargetTriple.getArch() == Triple::x86_64;
227}
228
229bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
230 return ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
231 : !DisableOptimization;
232}
233
234bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
235 bool DisableOptimization) {
236 return shouldInstrumentStack(TargetTriple) &&
237 mightUseStackSafetyAnalysis(DisableOptimization);
238}
239
240bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
241 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
242}
243
244/// An instrumentation pass implementing detection of addressability bugs
245/// using tagged pointers.
246class HWAddressSanitizer {
247private:
248 struct AllocaInfo {
249 AllocaInst *AI;
250 SmallVector<IntrinsicInst *, 2> LifetimeStart;
251 SmallVector<IntrinsicInst *, 2> LifetimeEnd;
252 };
253
254public:
255 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
256 const StackSafetyGlobalInfo *SSI)
257 : M(M), SSI(SSI) {
258 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
259 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
260 ? ClEnableKhwasan
261 : CompileKernel;
262
263 initializeModule();
264 }
265
266 void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
267
268 DenseMap<AllocaInst *, AllocaInst *> padInterestingAllocas(
269 const MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument);
270 bool sanitizeFunction(Function &F,
271 llvm::function_ref<const DominatorTree &()> GetDT,
272 llvm::function_ref<const PostDominatorTree &()> GetPDT);
273 void initializeModule();
274 void createHwasanCtorComdat();
275
276 void initializeCallbacks(Module &M);
277
278 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
279
280 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
281 Value *getShadowNonTls(IRBuilder<> &IRB);
282
283 void untagPointerOperand(Instruction *I, Value *Addr);
284 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
285 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
286 unsigned AccessSizeIndex,
287 Instruction *InsertBefore);
288 void instrumentMemIntrinsic(MemIntrinsic *MI);
289 bool instrumentMemAccess(InterestingMemoryOperand &O);
290 bool ignoreAccess(Value *Ptr);
291 void getInterestingMemoryOperands(
292 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
293
294 bool isInterestingAlloca(const AllocaInst &AI);
295 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
296 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
297 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
298 static bool isStandardLifetime(const AllocaInfo &AllocaInfo,
299 const DominatorTree &DT);
300 bool instrumentStack(
301 MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument,
302 SmallVector<Instruction *, 4> &UnrecognizedLifetimes,
303 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
304 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag,
305 llvm::function_ref<const DominatorTree &()> GetDT,
306 llvm::function_ref<const PostDominatorTree &()> GetPDT);
307 Value *readRegister(IRBuilder<> &IRB, StringRef Name);
308 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
309 Value *getNextTagWithCall(IRBuilder<> &IRB);
310 Value *getStackBaseTag(IRBuilder<> &IRB);
311 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
312 unsigned AllocaNo);
313 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
314
315 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
316 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
317 unsigned retagMask(unsigned AllocaNo);
318
319 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
320
321 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
322 void instrumentGlobals();
323
324 void instrumentPersonalityFunctions();
325
326private:
327 LLVMContext *C;
328 Module &M;
329 const StackSafetyGlobalInfo *SSI;
330 Triple TargetTriple;
331 FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
332 FunctionCallee HWAsanHandleVfork;
333
334 /// This struct defines the shadow mapping using the rule:
335 /// shadow = (mem >> Scale) + Offset.
336 /// If InGlobal is true, then
337 /// extern char __hwasan_shadow[];
338 /// shadow = (mem >> Scale) + &__hwasan_shadow
339 /// If InTls is true, then
340 /// extern char *__hwasan_tls;
341 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
342 ///
343 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
344 /// ring buffer for storing stack allocations on targets that support it.
345 struct ShadowMapping {
346 int Scale;
347 uint64_t Offset;
348 bool InGlobal;
349 bool InTls;
350 bool WithFrameRecord;
351
352 void init(Triple &TargetTriple, bool InstrumentWithCalls);
353 unsigned getObjectAlignment() const { return 1U << Scale; }
354 };
355
356 ShadowMapping Mapping;
357
358 Type *VoidTy = Type::getVoidTy(M.getContext());
359 Type *IntptrTy;
360 Type *Int8PtrTy;
361 Type *Int8Ty;
362 Type *Int32Ty;
363 Type *Int64Ty = Type::getInt64Ty(M.getContext());
364
365 bool CompileKernel;
366 bool Recover;
367 bool OutlinedChecks;
368 bool UseShortGranules;
369 bool InstrumentLandingPads;
370 bool InstrumentWithCalls;
371 bool InstrumentStack;
372 bool DetectUseAfterScope;
373 bool UsePageAliases;
374
375 bool HasMatchAllTag = false;
376 uint8_t MatchAllTag = 0;
377
378 unsigned PointerTagShift;
379 uint64_t TagMaskByte;
380
381 Function *HwasanCtorFunction;
382
383 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
384 FunctionCallee HwasanMemoryAccessCallbackSized[2];
385
386 FunctionCallee HwasanTagMemoryFunc;
387 FunctionCallee HwasanGenerateTagFunc;
388
389 Constant *ShadowGlobal;
390
391 Value *ShadowBase = nullptr;
392 Value *StackBaseTag = nullptr;
393 GlobalValue *ThreadPtrGlobal = nullptr;
394};
395
396class HWAddressSanitizerLegacyPass : public FunctionPass {
397public:
398 // Pass identification, replacement for typeid.
399 static char ID;
400
401 explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false,
402 bool Recover = false,
403 bool DisableOptimization = false)
404 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover),
405 DisableOptimization(DisableOptimization) {
406 initializeHWAddressSanitizerLegacyPassPass(
407 *PassRegistry::getPassRegistry());
408 }
409
410 StringRef getPassName() const override { return "HWAddressSanitizer"; }
411
412 bool doInitialization(Module &M) override {
413 HWASan = std::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover,
414 /*SSI=*/nullptr);
415 return true;
416 }
417
418 bool runOnFunction(Function &F) override {
419 auto TargetTriple = Triple(F.getParent()->getTargetTriple());
420 if (shouldUseStackSafetyAnalysis(TargetTriple, DisableOptimization)) {
421 // We cannot call getAnalysis in doInitialization, that would cause a
422 // crash as the required analyses are not initialized yet.
423 HWASan->setSSI(
424 &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult());
425 }
426 return HWASan->sanitizeFunction(
427 F,
428 [&]() -> const DominatorTree & {
429 return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
430 },
431 [&]() -> const PostDominatorTree & {
432 return getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
433 });
434 }
435
436 bool doFinalization(Module &M) override {
437 HWASan.reset();
438 return false;
439 }
440
441 void getAnalysisUsage(AnalysisUsage &AU) const override {
442 // This is an over-estimation of, in case we are building for an
443 // architecture that doesn't allow stack tagging we will still load the
444 // analysis.
445 // This is so we don't need to plumb TargetTriple all the way to here.
446 if (mightUseStackSafetyAnalysis(DisableOptimization))
447 AU.addRequired<StackSafetyGlobalInfoWrapperPass>();
448 AU.addRequired<DominatorTreeWrapperPass>();
449 AU.addRequired<PostDominatorTreeWrapperPass>();
450 }
451
452private:
453 std::unique_ptr<HWAddressSanitizer> HWASan;
454 bool CompileKernel;
455 bool Recover;
456 bool DisableOptimization;
457};
458
459} // end anonymous namespace
460
461char HWAddressSanitizerLegacyPass::ID = 0;
462
463INITIALIZE_PASS_BEGIN(static void *initializeHWAddressSanitizerLegacyPassPassOnce(PassRegistry
&Registry) {
464 HWAddressSanitizerLegacyPass, "hwasan",static void *initializeHWAddressSanitizerLegacyPassPassOnce(PassRegistry
&Registry) {
465 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,static void *initializeHWAddressSanitizerLegacyPassPassOnce(PassRegistry
&Registry) {
466 false)static void *initializeHWAddressSanitizerLegacyPassPassOnce(PassRegistry
&Registry) {
467INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass)initializeStackSafetyGlobalInfoWrapperPassPass(Registry);
468INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
469INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry);
470INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "HWAddressSanitizer: detect memory bugs using tagged addressing."
, "hwasan", &HWAddressSanitizerLegacyPass::ID, PassInfo::
NormalCtor_t(callDefaultCtor<HWAddressSanitizerLegacyPass>
), false, false); Registry.registerPass(*PI, true); return PI
; } static llvm::once_flag InitializeHWAddressSanitizerLegacyPassPassFlag
; void llvm::initializeHWAddressSanitizerLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeHWAddressSanitizerLegacyPassPassFlag
, initializeHWAddressSanitizerLegacyPassPassOnce, std::ref(Registry
)); }
471 HWAddressSanitizerLegacyPass, "hwasan",PassInfo *PI = new PassInfo( "HWAddressSanitizer: detect memory bugs using tagged addressing."
, "hwasan", &HWAddressSanitizerLegacyPass::ID, PassInfo::
NormalCtor_t(callDefaultCtor<HWAddressSanitizerLegacyPass>
), false, false); Registry.registerPass(*PI, true); return PI
; } static llvm::once_flag InitializeHWAddressSanitizerLegacyPassPassFlag
; void llvm::initializeHWAddressSanitizerLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeHWAddressSanitizerLegacyPassPassFlag
, initializeHWAddressSanitizerLegacyPassPassOnce, std::ref(Registry
)); }
472 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,PassInfo *PI = new PassInfo( "HWAddressSanitizer: detect memory bugs using tagged addressing."
, "hwasan", &HWAddressSanitizerLegacyPass::ID, PassInfo::
NormalCtor_t(callDefaultCtor<HWAddressSanitizerLegacyPass>
), false, false); Registry.registerPass(*PI, true); return PI
; } static llvm::once_flag InitializeHWAddressSanitizerLegacyPassPassFlag
; void llvm::initializeHWAddressSanitizerLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeHWAddressSanitizerLegacyPassPassFlag
, initializeHWAddressSanitizerLegacyPassPassOnce, std::ref(Registry
)); }
473 false)PassInfo *PI = new PassInfo( "HWAddressSanitizer: detect memory bugs using tagged addressing."
, "hwasan", &HWAddressSanitizerLegacyPass::ID, PassInfo::
NormalCtor_t(callDefaultCtor<HWAddressSanitizerLegacyPass>
), false, false); Registry.registerPass(*PI, true); return PI
; } static llvm::once_flag InitializeHWAddressSanitizerLegacyPassPassFlag
; void llvm::initializeHWAddressSanitizerLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeHWAddressSanitizerLegacyPassPassFlag
, initializeHWAddressSanitizerLegacyPassPassOnce, std::ref(Registry
)); }
474
475FunctionPass *
476llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel, bool Recover,
477 bool DisableOptimization) {
478 assert(!CompileKernel || Recover)(static_cast<void> (0));
479 return new HWAddressSanitizerLegacyPass(CompileKernel, Recover,
480 DisableOptimization);
481}
482
483PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
484 ModuleAnalysisManager &MAM) {
485 const StackSafetyGlobalInfo *SSI = nullptr;
486 auto TargetTriple = llvm::Triple(M.getTargetTriple());
487 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
488 SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(M);
489
490 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
491 bool Modified = false;
492 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
493 for (Function &F : M) {
494 Modified |= HWASan.sanitizeFunction(
495 F,
496 [&]() -> const DominatorTree & {
497 return FAM.getResult<DominatorTreeAnalysis>(F);
498 },
499 [&]() -> const PostDominatorTree & {
500 return FAM.getResult<PostDominatorTreeAnalysis>(F);
501 });
502 }
503 if (Modified)
504 return PreservedAnalyses::none();
505 return PreservedAnalyses::all();
506}
507
508void HWAddressSanitizer::createHwasanCtorComdat() {
509 std::tie(HwasanCtorFunction, std::ignore) =
510 getOrCreateSanitizerCtorAndInitFunctions(
511 M, kHwasanModuleCtorName, kHwasanInitName,
512 /*InitArgTypes=*/{},
513 /*InitArgs=*/{},
514 // This callback is invoked when the functions are created the first
515 // time. Hook them into the global ctors list in that case:
516 [&](Function *Ctor, FunctionCallee) {
517 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
518 Ctor->setComdat(CtorComdat);
519 appendToGlobalCtors(M, Ctor, 0, Ctor);
520 });
521
522 // Create a note that contains pointers to the list of global
523 // descriptors. Adding a note to the output file will cause the linker to
524 // create a PT_NOTE program header pointing to the note that we can use to
525 // find the descriptor list starting from the program headers. A function
526 // provided by the runtime initializes the shadow memory for the globals by
527 // accessing the descriptor list via the note. The dynamic loader needs to
528 // call this function whenever a library is loaded.
529 //
530 // The reason why we use a note for this instead of a more conventional
531 // approach of having a global constructor pass a descriptor list pointer to
532 // the runtime is because of an order of initialization problem. With
533 // constructors we can encounter the following problematic scenario:
534 //
535 // 1) library A depends on library B and also interposes one of B's symbols
536 // 2) B's constructors are called before A's (as required for correctness)
537 // 3) during construction, B accesses one of its "own" globals (actually
538 // interposed by A) and triggers a HWASAN failure due to the initialization
539 // for A not having happened yet
540 //
541 // Even without interposition it is possible to run into similar situations in
542 // cases where two libraries mutually depend on each other.
543 //
544 // We only need one note per binary, so put everything for the note in a
545 // comdat. This needs to be a comdat with an .init_array section to prevent
546 // newer versions of lld from discarding the note.
547 //
548 // Create the note even if we aren't instrumenting globals. This ensures that
549 // binaries linked from object files with both instrumented and
550 // non-instrumented globals will end up with a note, even if a comdat from an
551 // object file with non-instrumented globals is selected. The note is harmless
552 // if the runtime doesn't support it, since it will just be ignored.
553 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
554
555 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
556 auto Start =
557 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
558 nullptr, "__start_hwasan_globals");
559 Start->setVisibility(GlobalValue::HiddenVisibility);
560 Start->setDSOLocal(true);
561 auto Stop =
562 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
563 nullptr, "__stop_hwasan_globals");
564 Stop->setVisibility(GlobalValue::HiddenVisibility);
565 Stop->setDSOLocal(true);
566
567 // Null-terminated so actually 8 bytes, which are required in order to align
568 // the note properly.
569 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
570
571 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
572 Int32Ty, Int32Ty);
573 auto *Note =
574 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
575 GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName);
576 Note->setSection(".note.hwasan.globals");
577 Note->setComdat(NoteComdat);
578 Note->setAlignment(Align(4));
579 Note->setDSOLocal(true);
580
581 // The pointers in the note need to be relative so that the note ends up being
582 // placed in rodata, which is the standard location for notes.
583 auto CreateRelPtr = [&](Constant *Ptr) {
584 return ConstantExpr::getTrunc(
585 ConstantExpr::getSub(ConstantExpr::getPtrToInt(Ptr, Int64Ty),
586 ConstantExpr::getPtrToInt(Note, Int64Ty)),
587 Int32Ty);
588 };
589 Note->setInitializer(ConstantStruct::getAnon(
590 {ConstantInt::get(Int32Ty, 8), // n_namesz
591 ConstantInt::get(Int32Ty, 8), // n_descsz
592 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
593 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
594 appendToCompilerUsed(M, Note);
595
596 // Create a zero-length global in hwasan_globals so that the linker will
597 // always create start and stop symbols.
598 auto Dummy = new GlobalVariable(
599 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
600 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
601 Dummy->setSection("hwasan_globals");
602 Dummy->setComdat(NoteComdat);
603 Dummy->setMetadata(LLVMContext::MD_associated,
604 MDNode::get(*C, ValueAsMetadata::get(Note)));
605 appendToCompilerUsed(M, Dummy);
606}
607
608/// Module-level initialization.
609///
610/// inserts a call to __hwasan_init to the module's constructor list.
611void HWAddressSanitizer::initializeModule() {
612 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n")do { } while (false);
613 auto &DL = M.getDataLayout();
614
615 TargetTriple = Triple(M.getTargetTriple());
616
617 // x86_64 currently has two modes:
618 // - Intel LAM (default)
619 // - pointer aliasing (heap only)
620 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
621 UsePageAliases = shouldUsePageAliases(TargetTriple);
622 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
623 InstrumentStack = shouldInstrumentStack(TargetTriple);
624 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
625 PointerTagShift = IsX86_64 ? 57 : 56;
626 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
627
628 Mapping.init(TargetTriple, InstrumentWithCalls);
629
630 C = &(M.getContext());
631 IRBuilder<> IRB(*C);
632 IntptrTy = IRB.getIntPtrTy(DL);
633 Int8PtrTy = IRB.getInt8PtrTy();
634 Int8Ty = IRB.getInt8Ty();
635 Int32Ty = IRB.getInt32Ty();
636
637 HwasanCtorFunction = nullptr;
638
639 // Older versions of Android do not have the required runtime support for
640 // short granules, global or personality function instrumentation. On other
641 // platforms we currently require using the latest version of the runtime.
642 bool NewRuntime =
643 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
644
645 UseShortGranules =
646 ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
647 OutlinedChecks =
648 TargetTriple.isAArch64() && TargetTriple.isOSBinFormatELF() &&
649 (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
650
651 if (ClMatchAllTag.getNumOccurrences()) {
652 if (ClMatchAllTag != -1) {
653 HasMatchAllTag = true;
654 MatchAllTag = ClMatchAllTag & 0xFF;
655 }
656 } else if (CompileKernel) {
657 HasMatchAllTag = true;
658 MatchAllTag = 0xFF;
659 }
660
661 // If we don't have personality function support, fall back to landing pads.
662 InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
663 ? ClInstrumentLandingPads
664 : !NewRuntime;
665
666 if (!CompileKernel) {
667 createHwasanCtorComdat();
668 bool InstrumentGlobals =
669 ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
670
671 if (InstrumentGlobals && !UsePageAliases)
672 instrumentGlobals();
673
674 bool InstrumentPersonalityFunctions =
675 ClInstrumentPersonalityFunctions.getNumOccurrences()
676 ? ClInstrumentPersonalityFunctions
677 : NewRuntime;
678 if (InstrumentPersonalityFunctions)
679 instrumentPersonalityFunctions();
680 }
681
682 if (!TargetTriple.isAndroid()) {
683 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
684 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
685 GlobalValue::ExternalLinkage, nullptr,
686 "__hwasan_tls", nullptr,
687 GlobalVariable::InitialExecTLSModel);
688 appendToCompilerUsed(M, GV);
689 return GV;
690 });
691 ThreadPtrGlobal = cast<GlobalVariable>(C);
692 }
693}
694
695void HWAddressSanitizer::initializeCallbacks(Module &M) {
696 IRBuilder<> IRB(*C);
697 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
698 const std::string TypeStr = AccessIsWrite ? "store" : "load";
699 const std::string EndingStr = Recover ? "_noabort" : "";
700
701 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
702 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
703 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
704
705 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
706 AccessSizeIndex++) {
707 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
708 M.getOrInsertFunction(
709 ClMemoryAccessCallbackPrefix + TypeStr +
710 itostr(1ULL << AccessSizeIndex) + EndingStr,
711 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
712 }
713 }
714
715 HwasanTagMemoryFunc = M.getOrInsertFunction(
716 "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
717 HwasanGenerateTagFunc =
718 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
719
720 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
721 ArrayType::get(IRB.getInt8Ty(), 0));
722
723 const std::string MemIntrinCallbackPrefix =
724 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
725 HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
726 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
727 IRB.getInt8PtrTy(), IntptrTy);
728 HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
729 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
730 IRB.getInt8PtrTy(), IntptrTy);
731 HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
732 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
733 IRB.getInt32Ty(), IntptrTy);
734
735 HWAsanHandleVfork =
736 M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
737}
738
739Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
740 // An empty inline asm with input reg == output reg.
741 // An opaque no-op cast, basically.
742 // This prevents code bloat as a result of rematerializing trivial definitions
743 // such as constants or global addresses at every load and store.
744 InlineAsm *Asm =
745 InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
746 StringRef(""), StringRef("=r,0"),
747 /*hasSideEffects=*/false);
748 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
749}
750
751Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
752 return getOpaqueNoopCast(IRB, ShadowGlobal);
753}
754
755Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
756 if (Mapping.Offset != kDynamicShadowSentinel)
757 return getOpaqueNoopCast(
758 IRB, ConstantExpr::getIntToPtr(
759 ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
760
761 if (Mapping.InGlobal) {
762 return getDynamicShadowIfunc(IRB);
763 } else {
764 Value *GlobalDynamicAddress =
765 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
766 kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
767 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
768 }
769}
770
771bool HWAddressSanitizer::ignoreAccess(Value *Ptr) {
772 // Do not instrument acesses from different address spaces; we cannot deal
773 // with them.
774 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
775 if (PtrTy->getPointerAddressSpace() != 0)
776 return true;
777
778 // Ignore swifterror addresses.
779 // swifterror memory addresses are mem2reg promoted by instruction
780 // selection. As such they cannot have regular uses like an instrumentation
781 // function and it makes no sense to track them as memory.
782 if (Ptr->isSwiftError())
783 return true;
784
785 return false;
786}
787
788void HWAddressSanitizer::getInterestingMemoryOperands(
789 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
790 // Skip memory accesses inserted by another instrumentation.
791 if (I->hasMetadata("nosanitize"))
792 return;
793
794 // Do not instrument the load fetching the dynamic shadow address.
795 if (ShadowBase == I)
796 return;
797
798 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
799 if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
800 return;
801 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
802 LI->getType(), LI->getAlign());
803 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
804 if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
805 return;
806 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
807 SI->getValueOperand()->getType(), SI->getAlign());
808 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
809 if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
810 return;
811 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
812 RMW->getValOperand()->getType(), None);
813 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
814 if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
815 return;
816 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
817 XCHG->getCompareOperand()->getType(), None);
818 } else if (auto CI = dyn_cast<CallInst>(I)) {
819 for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
820 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
821 ignoreAccess(CI->getArgOperand(ArgNo)))
822 continue;
823 Type *Ty = CI->getParamByValType(ArgNo);
824 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
825 }
826 }
827}
828
829static unsigned getPointerOperandIndex(Instruction *I) {
830 if (LoadInst *LI = dyn_cast<LoadInst>(I))
831 return LI->getPointerOperandIndex();
832 if (StoreInst *SI = dyn_cast<StoreInst>(I))
833 return SI->getPointerOperandIndex();
834 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
835 return RMW->getPointerOperandIndex();
836 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
837 return XCHG->getPointerOperandIndex();
838 report_fatal_error("Unexpected instruction");
839 return -1;
840}
841
842static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
843 size_t Res = countTrailingZeros(TypeSize / 8);
844 assert(Res < kNumberOfAccessSizes)(static_cast<void> (0));
845 return Res;
846}
847
848void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
849 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64)
850 return;
851
852 IRBuilder<> IRB(I);
853 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
854 Value *UntaggedPtr =
855 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
856 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
857}
858
859Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
860 // Mem >> Scale
861 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
862 if (Mapping.Offset == 0)
863 return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
864 // (Mem >> Scale) + Offset
865 return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
866}
867
868void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
869 unsigned AccessSizeIndex,
870 Instruction *InsertBefore) {
871 assert(!UsePageAliases)(static_cast<void> (0));
872 const int64_t AccessInfo =
873 (CompileKernel << HWASanAccessInfo::CompileKernelShift) +
874 (HasMatchAllTag << HWASanAccessInfo::HasMatchAllShift) +
875 (MatchAllTag << HWASanAccessInfo::MatchAllShift) +
876 (Recover << HWASanAccessInfo::RecoverShift) +
877 (IsWrite << HWASanAccessInfo::IsWriteShift) +
878 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
879 IRBuilder<> IRB(InsertBefore);
880
881 if (OutlinedChecks) {
882 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
883 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
884 IRB.CreateCall(Intrinsic::getDeclaration(
885 M, UseShortGranules
886 ? Intrinsic::hwasan_check_memaccess_shortgranules
887 : Intrinsic::hwasan_check_memaccess),
888 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
889 return;
890 }
891
892 Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
893 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, PointerTagShift),
894 IRB.getInt8Ty());
895 Value *AddrLong = untagPointer(IRB, PtrLong);
896 Value *Shadow = memToShadow(AddrLong, IRB);
897 Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
898 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
899
900 if (HasMatchAllTag) {
901 Value *TagNotIgnored = IRB.CreateICmpNE(
902 PtrTag, ConstantInt::get(PtrTag->getType(), MatchAllTag));
903 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
904 }
905
906 Instruction *CheckTerm =
907 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
908 MDBuilder(*C).createBranchWeights(1, 100000));
909
910 IRB.SetInsertPoint(CheckTerm);
911 Value *OutOfShortGranuleTagRange =
912 IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
913 Instruction *CheckFailTerm =
914 SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
915 MDBuilder(*C).createBranchWeights(1, 100000));
916
917 IRB.SetInsertPoint(CheckTerm);
918 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
919 PtrLowBits = IRB.CreateAdd(
920 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
921 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
922 SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
923 MDBuilder(*C).createBranchWeights(1, 100000),
924 (DomTreeUpdater *)nullptr, nullptr,
925 CheckFailTerm->getParent());
926
927 IRB.SetInsertPoint(CheckTerm);
928 Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
929 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
930 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
931 Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
932 SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
933 MDBuilder(*C).createBranchWeights(1, 100000),
934 (DomTreeUpdater *)nullptr, nullptr,
935 CheckFailTerm->getParent());
936
937 IRB.SetInsertPoint(CheckFailTerm);
938 InlineAsm *Asm;
939 switch (TargetTriple.getArch()) {
940 case Triple::x86_64:
941 // The signal handler will find the data address in rdi.
942 Asm = InlineAsm::get(
943 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
944 "int3\nnopl " +
945 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
946 "(%rax)",
947 "{rdi}",
948 /*hasSideEffects=*/true);
949 break;
950 case Triple::aarch64:
951 case Triple::aarch64_be:
952 // The signal handler will find the data address in x0.
953 Asm = InlineAsm::get(
954 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
955 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
956 "{x0}",
957 /*hasSideEffects=*/true);
958 break;
959 default:
960 report_fatal_error("unsupported architecture");
961 }
962 IRB.CreateCall(Asm, PtrLong);
963 if (Recover)
964 cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
965}
966
967void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
968 IRBuilder<> IRB(MI);
969 if (isa<MemTransferInst>(MI)) {
970 IRB.CreateCall(
971 isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
972 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
973 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
974 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
975 } else if (isa<MemSetInst>(MI)) {
976 IRB.CreateCall(
977 HWAsanMemset,
978 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
979 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
980 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
981 }
982 MI->eraseFromParent();
983}
984
985bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
986 Value *Addr = O.getPtr();
987
988 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n")do { } while (false);
989
990 if (O.MaybeMask)
991 return false; // FIXME
992
993 IRBuilder<> IRB(O.getInsn());
994 if (isPowerOf2_64(O.TypeSize) &&
995 (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
996 (!O.Alignment || *O.Alignment >= (1ULL << Mapping.Scale) ||
997 *O.Alignment >= O.TypeSize / 8)) {
998 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
999 if (InstrumentWithCalls) {
1000 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1001 IRB.CreatePointerCast(Addr, IntptrTy));
1002 } else {
1003 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
1004 }
1005 } else {
1006 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
1007 {IRB.CreatePointerCast(Addr, IntptrTy),
1008 ConstantInt::get(IntptrTy, O.TypeSize / 8)});
1009 }
1010 untagPointerOperand(O.getInsn(), Addr);
1011
1012 return true;
1013}
1014
1015static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
1016 uint64_t ArraySize = 1;
1017 if (AI.isArrayAllocation()) {
13
Assuming the condition is true
14
Taking true branch
1018 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
15
Assuming the object is not a 'ConstantInt'
16
'CI' initialized to a null pointer value
1019 assert(CI && "non-constant array size")(static_cast<void> (0));
1020 ArraySize = CI->getZExtValue();
17
Called C++ object pointer is null
1021 }
1022 Type *Ty = AI.getAllocatedType();
1023 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
1024 return SizeInBytes * ArraySize;
1025}
1026
1027void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1028 size_t Size) {
1029 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1030 if (!UseShortGranules)
1031 Size = AlignedSize;
1032
1033 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
1034 if (InstrumentWithCalls) {
1035 IRB.CreateCall(HwasanTagMemoryFunc,
1036 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
1037 ConstantInt::get(IntptrTy, AlignedSize)});
1038 } else {
1039 size_t ShadowSize = Size >> Mapping.Scale;
1040 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
1041 // If this memset is not inlined, it will be intercepted in the hwasan
1042 // runtime library. That's OK, because the interceptor skips the checks if
1043 // the address is in the shadow region.
1044 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1045 // llvm.memset right here into either a sequence of stores, or a call to
1046 // hwasan_tag_memory.
1047 if (ShadowSize)
1048 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1));
1049 if (Size != AlignedSize) {
1050 IRB.CreateStore(
1051 ConstantInt::get(Int8Ty, Size % Mapping.getObjectAlignment()),
1052 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1053 IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
1054 Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
1055 AlignedSize - 1));
1056 }
1057 }
1058}
1059
1060unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1061 if (TargetTriple.getArch() == Triple::x86_64)
1062 return AllocaNo & TagMaskByte;
1063
1064 // A list of 8-bit numbers that have at most one run of non-zero bits.
1065 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1066 // masks.
1067 // The list does not include the value 255, which is used for UAR.
1068 //
1069 // Because we are more likely to use earlier elements of this list than later
1070 // ones, it is sorted in increasing order of probability of collision with a
1071 // mask allocated (temporally) nearby. The program that generated this list
1072 // can be found at:
1073 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1074 static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240,
1075 48, 16, 120, 248, 56, 24, 8, 124, 252,
1076 60, 28, 12, 4, 126, 254, 62, 30, 14,
1077 6, 2, 127, 63, 31, 15, 7, 3, 1};
1078 return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
1079}
1080
1081Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1082 if (TargetTriple.getArch() == Triple::x86_64) {
1083 Constant *TagMask = ConstantInt::get(IntptrTy, TagMaskByte);
1084 Value *NewTag = IRB.CreateAnd(OldTag, TagMask);
1085 return NewTag;
1086 }
1087 // aarch64 uses 8-bit tags, so no mask is needed.
1088 return OldTag;
1089}
1090
1091Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1092 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1093}
1094
1095Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1096 if (ClGenerateTagsWithCalls)
1097 return getNextTagWithCall(IRB);
1098 if (StackBaseTag)
1099 return StackBaseTag;
1100 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
1101 // first).
1102 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1103 auto GetStackPointerFn = Intrinsic::getDeclaration(
1104 M, Intrinsic::frameaddress,
1105 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1106 Value *StackPointer = IRB.CreateCall(
1107 GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
1108
1109 // Extract some entropy from the stack pointer for the tags.
1110 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1111 // between functions).
1112 Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
1113 Value *StackTag =
1114 applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1115 IRB.CreateLShr(StackPointerLong, 20)));
1116 StackTag->setName("hwasan.stack.base.tag");
1117 return StackTag;
1118}
1119
1120Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1121 AllocaInst *AI, unsigned AllocaNo) {
1122 if (ClGenerateTagsWithCalls)
1123 return getNextTagWithCall(IRB);
1124 return IRB.CreateXor(StackTag,
1125 ConstantInt::get(IntptrTy, retagMask(AllocaNo)));
1126}
1127
1128Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
1129 if (ClUARRetagToZero)
1130 return ConstantInt::get(IntptrTy, 0);
1131 if (ClGenerateTagsWithCalls)
1132 return getNextTagWithCall(IRB);
1133 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, TagMaskByte));
1134}
1135
1136// Add a tag to an address.
1137Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1138 Value *PtrLong, Value *Tag) {
1139 assert(!UsePageAliases)(static_cast<void> (0));
1140 Value *TaggedPtrLong;
1141 if (CompileKernel) {
1142 // Kernel addresses have 0xFF in the most significant byte.
1143 Value *ShiftedTag =
1144 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1145 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1146 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1147 } else {
1148 // Userspace can simply do OR (tag << PointerTagShift);
1149 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1150 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1151 }
1152 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1153}
1154
1155// Remove tag from an address.
1156Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1157 assert(!UsePageAliases)(static_cast<void> (0));
1158 Value *UntaggedPtrLong;
1159 if (CompileKernel) {
1160 // Kernel addresses have 0xFF in the most significant byte.
1161 UntaggedPtrLong =
1162 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1163 0xFFULL << PointerTagShift));
1164 } else {
1165 // Userspace addresses have 0x00.
1166 UntaggedPtrLong =
1167 IRB.CreateAnd(PtrLong, ConstantInt::get(PtrLong->getType(),
1168 ~(0xFFULL << PointerTagShift)));
1169 }
1170 return UntaggedPtrLong;
1171}
1172
1173Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1174 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1175 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1176 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1177 // in Bionic's libc/private/bionic_tls.h.
1178 Function *ThreadPointerFunc =
1179 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1180 Value *SlotPtr = IRB.CreatePointerCast(
1181 IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
1182 IRB.CreateCall(ThreadPointerFunc), 0x30),
1183 Ty->getPointerTo(0));
1184 return SlotPtr;
1185 }
1186 if (ThreadPtrGlobal)
1187 return ThreadPtrGlobal;
1188
1189 return nullptr;
1190}
1191
1192void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1193 if (!Mapping.InTls)
1194 ShadowBase = getShadowNonTls(IRB);
1195 else if (!WithFrameRecord && TargetTriple.isAndroid())
1196 ShadowBase = getDynamicShadowIfunc(IRB);
1197
1198 if (!WithFrameRecord && ShadowBase)
1199 return;
1200
1201 Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1202 assert(SlotPtr)(static_cast<void> (0));
1203
1204 Value *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1205 // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
1206 Value *ThreadLongMaybeUntagged =
1207 TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
1208
1209 if (WithFrameRecord) {
1210 Function *F = IRB.GetInsertBlock()->getParent();
1211 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1212
1213 // Prepare ring buffer data.
1214 Value *PC;
1215 if (TargetTriple.getArch() == Triple::aarch64)
1216 PC = readRegister(IRB, "pc");
1217 else
1218 PC = IRB.CreatePtrToInt(F, IntptrTy);
1219 Module *M = F->getParent();
1220 auto GetStackPointerFn = Intrinsic::getDeclaration(
1221 M, Intrinsic::frameaddress,
1222 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1223 Value *SP = IRB.CreatePtrToInt(
1224 IRB.CreateCall(GetStackPointerFn,
1225 {Constant::getNullValue(IRB.getInt32Ty())}),
1226 IntptrTy);
1227 // Mix SP and PC.
1228 // Assumptions:
1229 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1230 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
1231 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1232 // 0xSSSSPPPPPPPPPPPP
1233 SP = IRB.CreateShl(SP, 44);
1234
1235 // Store data to ring buffer.
1236 Value *RecordPtr =
1237 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
1238 IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
1239
1240 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1241 // buffer in pages, it must be a power of two, and the start of the buffer
1242 // must be aligned by twice that much. Therefore wrap around of the ring
1243 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1244 // The use of AShr instead of LShr is due to
1245 // https://bugs.llvm.org/show_bug.cgi?id=39030
1246 // Runtime library makes sure not to use the highest bit.
1247 Value *WrapMask = IRB.CreateXor(
1248 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1249 ConstantInt::get(IntptrTy, (uint64_t)-1));
1250 Value *ThreadLongNew = IRB.CreateAnd(
1251 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1252 IRB.CreateStore(ThreadLongNew, SlotPtr);
1253 }
1254
1255 if (!ShadowBase) {
1256 // Get shadow base address by aligning RecordPtr up.
1257 // Note: this is not correct if the pointer is already aligned.
1258 // Runtime library will make sure this never happens.
1259 ShadowBase = IRB.CreateAdd(
1260 IRB.CreateOr(
1261 ThreadLongMaybeUntagged,
1262 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1263 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1264 ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
1265 }
1266}
1267
1268Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1269 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1270 Function *ReadRegister =
1271 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1272 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1273 Value *Args[] = {MetadataAsValue::get(*C, MD)};
1274 return IRB.CreateCall(ReadRegister, Args);
1275}
1276
1277bool HWAddressSanitizer::instrumentLandingPads(
1278 SmallVectorImpl<Instruction *> &LandingPadVec) {
1279 for (auto *LP : LandingPadVec) {
1280 IRBuilder<> IRB(LP->getNextNode());
1281 IRB.CreateCall(
1282 HWAsanHandleVfork,
1283 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1284 : "sp")});
1285 }
1286 return true;
1287}
1288
1289static bool
1290maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
1291 const DominatorTree &DT) {
1292 // If we have too many lifetime ends, give up, as the algorithm below is N^2.
1293 if (Insts.size() > ClMaxLifetimes)
1294 return true;
1295 for (size_t I = 0; I < Insts.size(); ++I) {
1296 for (size_t J = 0; J < Insts.size(); ++J) {
1297 if (I == J)
1298 continue;
1299 if (isPotentiallyReachable(Insts[I], Insts[J], nullptr, &DT))
1300 return true;
1301 }
1302 }
1303 return false;
1304}
1305
1306// static
1307bool HWAddressSanitizer::isStandardLifetime(const AllocaInfo &AllocaInfo,
1308 const DominatorTree &DT) {
1309 // An alloca that has exactly one start and end in every possible execution.
1310 // If it has multiple ends, they have to be unreachable from each other, so
1311 // at most one of them is actually used for each execution of the function.
1312 return AllocaInfo.LifetimeStart.size() == 1 &&
1313 (AllocaInfo.LifetimeEnd.size() == 1 ||
1314 (AllocaInfo.LifetimeEnd.size() > 0 &&
1315 !maybeReachableFromEachOther(AllocaInfo.LifetimeEnd, DT)));
1316}
1317
1318bool HWAddressSanitizer::instrumentStack(
1319 MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument,
1320 SmallVector<Instruction *, 4> &UnrecognizedLifetimes,
1321 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
1322 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag,
1323 llvm::function_ref<const DominatorTree &()> GetDT,
1324 llvm::function_ref<const PostDominatorTree &()> GetPDT) {
1325 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1326 // alloca addresses using that. Unfortunately, offsets are not known yet
1327 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1328 // temp, shift-OR it into each alloca address and xor with the retag mask.
1329 // This generates one extra instruction per alloca use.
1330 unsigned int I = 0;
1331
1332 for (auto &KV : AllocasToInstrument) {
1333 auto N = I++;
1334 auto *AI = KV.first;
1335 AllocaInfo &Info = KV.second;
1336 IRBuilder<> IRB(AI->getNextNode());
1337
1338 // Replace uses of the alloca with tagged address.
1339 Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
1340 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1341 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
1342 std::string Name =
1343 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1344 Replacement->setName(Name + ".hwasan");
1345
1346 AI->replaceUsesWithIf(Replacement,
1347 [AILong](Use &U) { return U.getUser() != AILong; });
1348
1349 for (auto *DDI : AllocaDbgMap.lookup(AI)) {
1350 // Prepend "tag_offset, N" to the dwarf expression.
1351 // Tag offset logically applies to the alloca pointer, and it makes sense
1352 // to put it at the beginning of the expression.
1353 SmallVector<uint64_t, 8> NewOps = {dwarf::DW_OP_LLVM_tag_offset,
1354 retagMask(N)};
1355 for (size_t LocNo = 0; LocNo < DDI->getNumVariableLocationOps(); ++LocNo)
1356 if (DDI->getVariableLocationOp(LocNo) == AI)
1357 DDI->setExpression(DIExpression::appendOpsToArg(DDI->getExpression(),
1358 NewOps, LocNo));
1359 }
1360
1361 size_t Size = getAllocaSizeInBytes(*AI);
1362 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1363 bool StandardLifetime =
1364 UnrecognizedLifetimes.empty() && isStandardLifetime(Info, GetDT());
1365 if (DetectUseAfterScope && StandardLifetime) {
1366 IntrinsicInst *Start = Info.LifetimeStart[0];
1367 IRB.SetInsertPoint(Start->getNextNode());
1368 auto TagEnd = [&](Instruction *Node) {
1369 IRB.SetInsertPoint(Node);
1370 Value *UARTag = getUARTag(IRB, StackTag);
1371 tagAlloca(IRB, AI, UARTag, AlignedSize);
1372 };
1373 tagAlloca(IRB, AI, Tag, Size);
1374 if (!forAllReachableExits(GetDT(), GetPDT(), Start, Info.LifetimeEnd,
1375 RetVec, TagEnd)) {
1376 for (auto *End : Info.LifetimeEnd)
1377 End->eraseFromParent();
1378 }
1379 } else {
1380 tagAlloca(IRB, AI, Tag, Size);
1381 for (auto *RI : RetVec) {
1382 IRB.SetInsertPoint(RI);
1383 Value *UARTag = getUARTag(IRB, StackTag);
1384 tagAlloca(IRB, AI, UARTag, AlignedSize);
1385 }
1386 if (!StandardLifetime) {
1387 for (auto &II : Info.LifetimeStart)
1388 II->eraseFromParent();
1389 for (auto &II : Info.LifetimeEnd)
1390 II->eraseFromParent();
1391 }
1392 }
1393 }
1394 for (auto &I : UnrecognizedLifetimes)
1395 I->eraseFromParent();
1396 return true;
1397}
1398
1399bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1400 return (AI.getAllocatedType()->isSized() &&
1401 // FIXME: instrument dynamic allocas, too
1402 AI.isStaticAlloca() &&
11
Assuming the condition is true
1403 // alloca() may be called with 0 size, ignore it.
1404 getAllocaSizeInBytes(AI) > 0 &&
12
Calling 'getAllocaSizeInBytes'
1405 // We are only interested in allocas not promotable to registers.
1406 // Promotable allocas are common under -O0.
1407 !isAllocaPromotable(&AI) &&
1408 // inalloca allocas are not treated as static, and we don't want
1409 // dynamic alloca instrumentation for them as well.
1410 !AI.isUsedWithInAlloca() &&
1411 // swifterror allocas are register promoted by ISel
1412 !AI.isSwiftError()) &&
1413 // safe allocas are not interesting
1414 !(SSI && SSI->isSafe(AI));
1415}
1416
1417DenseMap<AllocaInst *, AllocaInst *> HWAddressSanitizer::padInterestingAllocas(
1418 const MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument) {
1419 DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
1420 for (auto &KV : AllocasToInstrument) {
1421 AllocaInst *AI = KV.first;
1422 uint64_t Size = getAllocaSizeInBytes(*AI);
1423 uint64_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1424 AI->setAlignment(
1425 Align(std::max(AI->getAlignment(), Mapping.getObjectAlignment())));
1426 if (Size != AlignedSize) {
1427 Type *AllocatedType = AI->getAllocatedType();
1428 if (AI->isArrayAllocation()) {
1429 uint64_t ArraySize =
1430 cast<ConstantInt>(AI->getArraySize())->getZExtValue();
1431 AllocatedType = ArrayType::get(AllocatedType, ArraySize);
1432 }
1433 Type *TypeWithPadding = StructType::get(
1434 AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size));
1435 auto *NewAI = new AllocaInst(
1436 TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
1437 NewAI->takeName(AI);
1438 NewAI->setAlignment(AI->getAlign());
1439 NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
1440 NewAI->setSwiftError(AI->isSwiftError());
1441 NewAI->copyMetadata(*AI);
1442 auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI);
1443 AI->replaceAllUsesWith(Bitcast);
1444 AllocaToPaddedAllocaMap[AI] = NewAI;
1445 }
1446 }
1447 return AllocaToPaddedAllocaMap;
1448}
1449
1450bool HWAddressSanitizer::sanitizeFunction(
1451 Function &F, llvm::function_ref<const DominatorTree &()> GetDT,
1452 llvm::function_ref<const PostDominatorTree &()> GetPDT) {
1453 if (&F == HwasanCtorFunction)
1
Assuming the condition is false
2
Taking false branch
1454 return false;
1455
1456 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
3
Assuming the condition is false
4
Taking false branch
1457 return false;
1458
1459 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n")do { } while (false);
5
Loop condition is false. Exiting loop
1460
1461 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1462 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1463 MapVector<AllocaInst *, AllocaInfo> AllocasToInstrument;
1464 SmallVector<Instruction *, 8> RetVec;
1465 SmallVector<Instruction *, 8> LandingPadVec;
1466 SmallVector<Instruction *, 4> UnrecognizedLifetimes;
1467 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> AllocaDbgMap;
1468 for (auto &BB : F) {
1469 for (auto &Inst : BB) {
1470 if (InstrumentStack) {
6
Assuming field 'InstrumentStack' is true
7
Taking true branch
1471 if (AllocaInst *AI
8.1
'AI' is non-null
= dyn_cast<AllocaInst>(&Inst)) {
8
Assuming the object is a 'AllocaInst'
9
Taking true branch
1472 if (isInterestingAlloca(*AI))
10
Calling 'HWAddressSanitizer::isInterestingAlloca'
1473 AllocasToInstrument.insert({AI, {}});
1474 continue;
1475 }
1476 auto *II = dyn_cast<IntrinsicInst>(&Inst);
1477 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
1478 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
1479 AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
1480 if (!AI) {
1481 UnrecognizedLifetimes.push_back(&Inst);
1482 continue;
1483 }
1484 if (!isInterestingAlloca(*AI))
1485 continue;
1486 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1487 AllocasToInstrument[AI].LifetimeStart.push_back(II);
1488 else
1489 AllocasToInstrument[AI].LifetimeEnd.push_back(II);
1490 continue;
1491 }
1492 }
1493
1494 if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
1495 isa<CleanupReturnInst>(Inst))
1496 RetVec.push_back(&Inst);
1497
1498 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
1499 for (Value *V : DVI->location_ops()) {
1500 if (auto *Alloca = dyn_cast_or_null<AllocaInst>(V))
1501 if (!AllocaDbgMap.count(Alloca) ||
1502 AllocaDbgMap[Alloca].back() != DVI)
1503 AllocaDbgMap[Alloca].push_back(DVI);
1504 }
1505 }
1506
1507 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1508 LandingPadVec.push_back(&Inst);
1509
1510 getInterestingMemoryOperands(&Inst, OperandsToInstrument);
1511
1512 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1513 IntrinToInstrument.push_back(MI);
1514 }
1515 }
1516
1517 initializeCallbacks(*F.getParent());
1518
1519 bool Changed = false;
1520
1521 if (!LandingPadVec.empty())
1522 Changed |= instrumentLandingPads(LandingPadVec);
1523
1524 if (AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1525 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1526 // __hwasan_personality_thunk is a no-op for functions without an
1527 // instrumented stack, so we can drop it.
1528 F.setPersonalityFn(nullptr);
1529 Changed = true;
1530 }
1531
1532 if (AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1533 IntrinToInstrument.empty())
1534 return Changed;
1535
1536 assert(!ShadowBase)(static_cast<void> (0));
1537
1538 Instruction *InsertPt = &*F.getEntryBlock().begin();
1539 IRBuilder<> EntryIRB(InsertPt);
1540 emitPrologue(EntryIRB,
1541 /*WithFrameRecord*/ ClRecordStackHistory &&
1542 Mapping.WithFrameRecord && !AllocasToInstrument.empty());
1543
1544 if (!AllocasToInstrument.empty()) {
1545 Value *StackTag =
1546 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1547 instrumentStack(AllocasToInstrument, UnrecognizedLifetimes, AllocaDbgMap,
1548 RetVec, StackTag, GetDT, GetPDT);
1549 }
1550 // Pad and align each of the allocas that we instrumented to stop small
1551 // uninteresting allocas from hiding in instrumented alloca's padding and so
1552 // that we have enough space to store real tags for short granules.
1553 DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap =
1554 padInterestingAllocas(AllocasToInstrument);
1555
1556 if (!AllocaToPaddedAllocaMap.empty()) {
1557 for (auto &BB : F) {
1558 for (auto &Inst : BB) {
1559 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
1560 SmallDenseSet<Value *> LocationOps(DVI->location_ops().begin(),
1561 DVI->location_ops().end());
1562 for (Value *V : LocationOps) {
1563 if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
1564 if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
1565 DVI->replaceVariableLocationOp(V, NewAI);
1566 }
1567 }
1568 }
1569 }
1570 }
1571 for (auto &P : AllocaToPaddedAllocaMap)
1572 P.first->eraseFromParent();
1573 }
1574
1575 // If we split the entry block, move any allocas that were originally in the
1576 // entry block back into the entry block so that they aren't treated as
1577 // dynamic allocas.
1578 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1579 InsertPt = &*F.getEntryBlock().begin();
1580 for (auto II = EntryIRB.GetInsertBlock()->begin(),
1581 IE = EntryIRB.GetInsertBlock()->end();
1582 II != IE;) {
1583 Instruction *I = &*II++;
1584 if (auto *AI = dyn_cast<AllocaInst>(I))
1585 if (isa<ConstantInt>(AI->getArraySize()))
1586 I->moveBefore(InsertPt);
1587 }
1588 }
1589
1590 for (auto &Operand : OperandsToInstrument)
1591 instrumentMemAccess(Operand);
1592
1593 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1594 for (auto Inst : IntrinToInstrument)
1595 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1596 }
1597
1598 ShadowBase = nullptr;
1599 StackBaseTag = nullptr;
1600
1601 return true;
1602}
1603
1604void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1605 assert(!UsePageAliases)(static_cast<void> (0));
1606 Constant *Initializer = GV->getInitializer();
1607 uint64_t SizeInBytes =
1608 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1609 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1610 if (SizeInBytes != NewSize) {
1611 // Pad the initializer out to the next multiple of 16 bytes and add the
1612 // required short granule tag.
1613 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1614 Init.back() = Tag;
1615 Constant *Padding = ConstantDataArray::get(*C, Init);
1616 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1617 }
1618
1619 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1620 GlobalValue::ExternalLinkage, Initializer,
1621 GV->getName() + ".hwasan");
1622 NewGV->copyAttributesFrom(GV);
1623 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1624 NewGV->copyMetadata(GV, 0);
1625 NewGV->setAlignment(
1626 MaybeAlign(std::max(GV->getAlignment(), Mapping.getObjectAlignment())));
1627
1628 // It is invalid to ICF two globals that have different tags. In the case
1629 // where the size of the global is a multiple of the tag granularity the
1630 // contents of the globals may be the same but the tags (i.e. symbol values)
1631 // may be different, and the symbols are not considered during ICF. In the
1632 // case where the size is not a multiple of the granularity, the short granule
1633 // tags would discriminate two globals with different tags, but there would
1634 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1635 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1636 // granule tag in the last byte.
1637 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1638
1639 // Descriptor format (assuming little-endian):
1640 // bytes 0-3: relative address of global
1641 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1642 // it isn't, we create multiple descriptors)
1643 // byte 7: tag
1644 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1645 const uint64_t MaxDescriptorSize = 0xfffff0;
1646 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1647 DescriptorPos += MaxDescriptorSize) {
1648 auto *Descriptor =
1649 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1650 nullptr, GV->getName() + ".hwasan.descriptor");
1651 auto *GVRelPtr = ConstantExpr::getTrunc(
1652 ConstantExpr::getAdd(
1653 ConstantExpr::getSub(
1654 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1655 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1656 ConstantInt::get(Int64Ty, DescriptorPos)),
1657 Int32Ty);
1658 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1659 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1660 Descriptor->setComdat(NewGV->getComdat());
1661 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1662 Descriptor->setSection("hwasan_globals");
1663 Descriptor->setMetadata(LLVMContext::MD_associated,
1664 MDNode::get(*C, ValueAsMetadata::get(NewGV)));
1665 appendToCompilerUsed(M, Descriptor);
1666 }
1667
1668 Constant *Aliasee = ConstantExpr::getIntToPtr(
1669 ConstantExpr::getAdd(
1670 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1671 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1672 GV->getType());
1673 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1674 GV->getLinkage(), "", Aliasee, &M);
1675 Alias->setVisibility(GV->getVisibility());
1676 Alias->takeName(GV);
1677 GV->replaceAllUsesWith(Alias);
1678 GV->eraseFromParent();
1679}
1680
1681static DenseSet<GlobalVariable *> getExcludedGlobals(Module &M) {
1682 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
1683 if (!Globals)
1684 return DenseSet<GlobalVariable *>();
1685 DenseSet<GlobalVariable *> Excluded(Globals->getNumOperands());
1686 for (auto MDN : Globals->operands()) {
1687 // Metadata node contains the global and the fields of "Entry".
1688 assert(MDN->getNumOperands() == 5)(static_cast<void> (0));
1689 auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
1690 // The optimizer may optimize away a global entirely.
1691 if (!V)
1692 continue;
1693 auto *StrippedV = V->stripPointerCasts();
1694 auto *GV = dyn_cast<GlobalVariable>(StrippedV);
1695 if (!GV)
1696 continue;
1697 ConstantInt *IsExcluded = mdconst::extract<ConstantInt>(MDN->getOperand(4));
1698 if (IsExcluded->isOne())
1699 Excluded.insert(GV);
1700 }
1701 return Excluded;
1702}
1703
1704void HWAddressSanitizer::instrumentGlobals() {
1705 std::vector<GlobalVariable *> Globals;
1706 auto ExcludedGlobals = getExcludedGlobals(M);
1707 for (GlobalVariable &GV : M.globals()) {
1708 if (ExcludedGlobals.count(&GV))
1709 continue;
1710
1711 if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
1712 GV.isThreadLocal())
1713 continue;
1714
1715 // Common symbols can't have aliases point to them, so they can't be tagged.
1716 if (GV.hasCommonLinkage())
1717 continue;
1718
1719 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1720 // which would be broken both by adding tags and potentially by the extra
1721 // padding/alignment that we insert.
1722 if (GV.hasSection())
1723 continue;
1724
1725 Globals.push_back(&GV);
1726 }
1727
1728 MD5 Hasher;
1729 Hasher.update(M.getSourceFileName());
1730 MD5::MD5Result Hash;
1731 Hasher.final(Hash);
1732 uint8_t Tag = Hash[0] & TagMaskByte;
1733
1734 for (GlobalVariable *GV : Globals) {
1735 // Skip tag 0 in order to avoid collisions with untagged memory.
1736 if (Tag == 0)
1737 Tag = 1;
1738 instrumentGlobal(GV, Tag++);
1739 }
1740}
1741
1742void HWAddressSanitizer::instrumentPersonalityFunctions() {
1743 // We need to untag stack frames as we unwind past them. That is the job of
1744 // the personality function wrapper, which either wraps an existing
1745 // personality function or acts as a personality function on its own. Each
1746 // function that has a personality function or that can be unwound past has
1747 // its personality function changed to a thunk that calls the personality
1748 // function wrapper in the runtime.
1749 MapVector<Constant *, std::vector<Function *>> PersonalityFns;
1750 for (Function &F : M) {
1751 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1752 continue;
1753
1754 if (F.hasPersonalityFn()) {
1755 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1756 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1757 PersonalityFns[nullptr].push_back(&F);
1758 }
1759 }
1760
1761 if (PersonalityFns.empty())
1762 return;
1763
1764 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1765 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
1766 Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
1767 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1768 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1769
1770 for (auto &P : PersonalityFns) {
1771 std::string ThunkName = kHwasanPersonalityThunkName;
1772 if (P.first)
1773 ThunkName += ("." + P.first->getName()).str();
1774 FunctionType *ThunkFnTy = FunctionType::get(
1775 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
1776 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1777 cast<GlobalValue>(P.first)->hasLocalLinkage());
1778 auto *ThunkFn = Function::Create(ThunkFnTy,
1779 IsLocal ? GlobalValue::InternalLinkage
1780 : GlobalValue::LinkOnceODRLinkage,
1781 ThunkName, &M);
1782 if (!IsLocal) {
1783 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1784 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1785 }
1786
1787 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1788 IRBuilder<> IRB(BB);
1789 CallInst *WrapperCall = IRB.CreateCall(
1790 HwasanPersonalityWrapper,
1791 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1792 ThunkFn->getArg(3), ThunkFn->getArg(4),
1793 P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
1794 : Constant::getNullValue(Int8PtrTy),
1795 IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
1796 IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
1797 WrapperCall->setTailCall();
1798 IRB.CreateRet(WrapperCall);
1799
1800 for (Function *F : P.second)
1801 F->setPersonalityFn(ThunkFn);
1802 }
1803}
1804
1805void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1806 bool InstrumentWithCalls) {
1807 Scale = kDefaultShadowScale;
1808 if (TargetTriple.isOSFuchsia()) {
1809 // Fuchsia is always PIE, which means that the beginning of the address
1810 // space is always available.
1811 InGlobal = false;
1812 InTls = false;
1813 Offset = 0;
1814 WithFrameRecord = true;
1815 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1816 InGlobal = false;
1817 InTls = false;
1818 Offset = ClMappingOffset;
1819 WithFrameRecord = false;
1820 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1821 InGlobal = false;
1822 InTls = false;
1823 Offset = 0;
1824 WithFrameRecord = false;
1825 } else if (ClWithIfunc) {
1826 InGlobal = true;
1827 InTls = false;
1828 Offset = kDynamicShadowSentinel;
1829 WithFrameRecord = false;
1830 } else if (ClWithTls) {
1831 InGlobal = false;
1832 InTls = true;
1833 Offset = kDynamicShadowSentinel;
1834 WithFrameRecord = true;
1835 } else {
1836 InGlobal = false;
1837 InTls = false;
1838 Offset = kDynamicShadowSentinel;
1839 WithFrameRecord = false;
1840 }
1841}