LLVM  14.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of HWAddressSanitizer, an address sanity checker
11 /// based on tagged addressing.
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/MapVector.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/Analysis/CFG.h"
24 #include "llvm/BinaryFormat/ELF.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/BasicBlock.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/InlineAsm.h"
36 #include "llvm/IR/InstVisitor.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/MDBuilder.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/InitializePasses.h"
47 #include "llvm/Pass.h"
48 #include "llvm/PassRegistry.h"
49 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/Debug.h"
58 #include <sstream>
59 
60 using namespace llvm;
61 
62 #define DEBUG_TYPE "hwasan"
63 
64 const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
65 const char kHwasanNoteName[] = "hwasan.note";
66 const char kHwasanInitName[] = "__hwasan_init";
67 const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
68 
70  "__hwasan_shadow_memory_dynamic_address";
71 
72 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
73 static const size_t kNumberOfAccessSizes = 5;
74 
75 static const size_t kDefaultShadowScale = 4;
78 
79 static const unsigned kShadowBaseAlignment = 32;
80 
82  ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
83  cl::desc("Prefix for memory access callbacks"),
84  cl::Hidden, cl::init("__hwasan_"));
85 
87  "hwasan-instrument-with-calls",
88  cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
89  cl::init(false));
90 
91 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
92  cl::desc("instrument read instructions"),
93  cl::Hidden, cl::init(true));
94 
95 static cl::opt<bool>
96  ClInstrumentWrites("hwasan-instrument-writes",
97  cl::desc("instrument write instructions"), cl::Hidden,
98  cl::init(true));
99 
101  "hwasan-instrument-atomics",
102  cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
103  cl::init(true));
104 
105 static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
106  cl::desc("instrument byval arguments"),
107  cl::Hidden, cl::init(true));
108 
109 static cl::opt<bool>
110  ClRecover("hwasan-recover",
111  cl::desc("Enable recovery mode (continue-after-error)."),
112  cl::Hidden, cl::init(false));
113 
114 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
115  cl::desc("instrument stack (allocas)"),
116  cl::Hidden, cl::init(true));
117 
118 static cl::opt<bool>
119  ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
120  cl::Hidden, cl::desc("Use Stack Safety analysis results"),
121  cl::Optional);
122 
124  "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
126  cl::desc("How many lifetime ends to handle for a single alloca."),
127  cl::Optional);
128 
129 static cl::opt<bool>
130  ClUseAfterScope("hwasan-use-after-scope",
131  cl::desc("detect use after scope within function"),
132  cl::Hidden, cl::init(false));
133 
135  "hwasan-uar-retag-to-zero",
136  cl::desc("Clear alloca tags before returning from the function to allow "
137  "non-instrumented and instrumented function calls mix. When set "
138  "to false, allocas are retagged before returning from the "
139  "function to detect use after return."),
140  cl::Hidden, cl::init(true));
141 
143  "hwasan-generate-tags-with-calls",
144  cl::desc("generate new tags with runtime library calls"), cl::Hidden,
145  cl::init(false));
146 
147 static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
149 
151  "hwasan-match-all-tag",
152  cl::desc("don't report bad accesses via pointers with this tag"),
153  cl::Hidden, cl::init(-1));
154 
155 static cl::opt<bool>
156  ClEnableKhwasan("hwasan-kernel",
157  cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
158  cl::Hidden, cl::init(false));
159 
160 // These flags allow to change the shadow mapping and control how shadow memory
161 // is accessed. The shadow mapping looks like:
162 // Shadow = (Mem >> scale) + offset
163 
164 static cl::opt<uint64_t>
165  ClMappingOffset("hwasan-mapping-offset",
166  cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
167  cl::Hidden, cl::init(0));
168 
169 static cl::opt<bool>
170  ClWithIfunc("hwasan-with-ifunc",
171  cl::desc("Access dynamic shadow through an ifunc global on "
172  "platforms that support this"),
173  cl::Hidden, cl::init(false));
174 
175 static cl::opt<bool> ClWithTls(
176  "hwasan-with-tls",
177  cl::desc("Access dynamic shadow through an thread-local pointer on "
178  "platforms that support this"),
179  cl::Hidden, cl::init(true));
180 
181 static cl::opt<bool>
182  ClRecordStackHistory("hwasan-record-stack-history",
183  cl::desc("Record stack frames with tagged allocations "
184  "in a thread-local ring buffer"),
185  cl::Hidden, cl::init(true));
186 static cl::opt<bool>
187  ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
188  cl::desc("instrument memory intrinsics"),
189  cl::Hidden, cl::init(true));
190 
191 static cl::opt<bool>
192  ClInstrumentLandingPads("hwasan-instrument-landing-pads",
193  cl::desc("instrument landing pads"), cl::Hidden,
194  cl::init(false), cl::ZeroOrMore);
195 
197  "hwasan-use-short-granules",
198  cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
199  cl::init(false), cl::ZeroOrMore);
200 
202  "hwasan-instrument-personality-functions",
203  cl::desc("instrument personality functions"), cl::Hidden, cl::init(false),
205 
206 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
207  cl::desc("inline all checks"),
208  cl::Hidden, cl::init(false));
209 
210 // Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
211 static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
212  cl::desc("Use page aliasing in HWASan"),
213  cl::Hidden, cl::init(false));
214 
215 namespace {
216 
217 bool shouldUsePageAliases(const Triple &TargetTriple) {
218  return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
219 }
220 
221 bool shouldInstrumentStack(const Triple &TargetTriple) {
222  return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
223 }
224 
225 bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
226  return ClInstrumentWithCalls || TargetTriple.getArch() == Triple::x86_64;
227 }
228 
229 bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
231  : !DisableOptimization;
232 }
233 
234 bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
235  bool DisableOptimization) {
236  return shouldInstrumentStack(TargetTriple) &&
237  mightUseStackSafetyAnalysis(DisableOptimization);
238 }
239 
240 bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
241  return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
242 }
243 
244 /// An instrumentation pass implementing detection of addressability bugs
245 /// using tagged pointers.
246 class HWAddressSanitizer {
247 private:
248  struct AllocaInfo {
249  AllocaInst *AI;
250  SmallVector<IntrinsicInst *, 2> LifetimeStart;
252  };
253 
254 public:
255  HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
256  const StackSafetyGlobalInfo *SSI)
257  : M(M), SSI(SSI) {
258  this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
259  this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
261  : CompileKernel;
262 
263  initializeModule();
264  }
265 
266  void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
267 
268  DenseMap<AllocaInst *, AllocaInst *> padInterestingAllocas(
269  const MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument);
270  bool sanitizeFunction(Function &F,
271  llvm::function_ref<const DominatorTree &()> GetDT,
272  llvm::function_ref<const PostDominatorTree &()> GetPDT);
273  void initializeModule();
274  void createHwasanCtorComdat();
275 
276  void initializeCallbacks(Module &M);
277 
278  Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
279 
280  Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
281  Value *getShadowNonTls(IRBuilder<> &IRB);
282 
283  void untagPointerOperand(Instruction *I, Value *Addr);
284  Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
285 
286  int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
287  void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
288  unsigned AccessSizeIndex,
289  Instruction *InsertBefore);
290  void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
291  unsigned AccessSizeIndex,
292  Instruction *InsertBefore);
293  bool ignoreMemIntrinsic(MemIntrinsic *MI);
294  void instrumentMemIntrinsic(MemIntrinsic *MI);
295  bool instrumentMemAccess(InterestingMemoryOperand &O);
296  bool ignoreAccess(Instruction *Inst, Value *Ptr);
297  void getInterestingMemoryOperands(
299 
300  bool isInterestingAlloca(const AllocaInst &AI);
301  void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
302  Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
303  Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
304  static bool isStandardLifetime(const AllocaInfo &AllocaInfo,
305  const DominatorTree &DT);
306  bool instrumentStack(
307  MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument,
308  SmallVector<Instruction *, 4> &UnrecognizedLifetimes,
309  DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
310  SmallVectorImpl<Instruction *> &RetVec, Value *StackTag,
311  llvm::function_ref<const DominatorTree &()> GetDT,
312  llvm::function_ref<const PostDominatorTree &()> GetPDT);
313  Value *readRegister(IRBuilder<> &IRB, StringRef Name);
314  bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
315  Value *getNextTagWithCall(IRBuilder<> &IRB);
316  Value *getStackBaseTag(IRBuilder<> &IRB);
317  Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
318  unsigned AllocaNo);
319  Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
320 
321  Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
322  Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
323  unsigned retagMask(unsigned AllocaNo);
324 
325  void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
326 
327  void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
328  void instrumentGlobals();
329 
330  void instrumentPersonalityFunctions();
331 
332 private:
333  LLVMContext *C;
334  Module &M;
335  const StackSafetyGlobalInfo *SSI;
336  Triple TargetTriple;
337  FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
338  FunctionCallee HWAsanHandleVfork;
339 
340  /// This struct defines the shadow mapping using the rule:
341  /// shadow = (mem >> Scale) + Offset.
342  /// If InGlobal is true, then
343  /// extern char __hwasan_shadow[];
344  /// shadow = (mem >> Scale) + &__hwasan_shadow
345  /// If InTls is true, then
346  /// extern char *__hwasan_tls;
347  /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
348  ///
349  /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
350  /// ring buffer for storing stack allocations on targets that support it.
351  struct ShadowMapping {
352  int Scale;
354  bool InGlobal;
355  bool InTls;
356  bool WithFrameRecord;
357 
358  void init(Triple &TargetTriple, bool InstrumentWithCalls);
359  uint64_t getObjectAlignment() const { return 1ULL << Scale; }
360  };
361 
362  ShadowMapping Mapping;
363 
364  Type *VoidTy = Type::getVoidTy(M.getContext());
365  Type *IntptrTy;
366  Type *Int8PtrTy;
367  Type *Int8Ty;
368  Type *Int32Ty;
369  Type *Int64Ty = Type::getInt64Ty(M.getContext());
370 
371  bool CompileKernel;
372  bool Recover;
373  bool OutlinedChecks;
374  bool UseShortGranules;
375  bool InstrumentLandingPads;
376  bool InstrumentWithCalls;
377  bool InstrumentStack;
378  bool DetectUseAfterScope;
379  bool UsePageAliases;
380 
381  bool HasMatchAllTag = false;
382  uint8_t MatchAllTag = 0;
383 
384  unsigned PointerTagShift;
385  uint64_t TagMaskByte;
386 
387  Function *HwasanCtorFunction;
388 
389  FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
390  FunctionCallee HwasanMemoryAccessCallbackSized[2];
391 
392  FunctionCallee HwasanTagMemoryFunc;
393  FunctionCallee HwasanGenerateTagFunc;
394 
395  Constant *ShadowGlobal;
396 
397  Value *ShadowBase = nullptr;
398  Value *StackBaseTag = nullptr;
399  GlobalValue *ThreadPtrGlobal = nullptr;
400 };
401 
402 class HWAddressSanitizerLegacyPass : public FunctionPass {
403 public:
404  // Pass identification, replacement for typeid.
405  static char ID;
406 
407  explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false,
408  bool Recover = false,
409  bool DisableOptimization = false)
410  : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover),
411  DisableOptimization(DisableOptimization) {
414  }
415 
416  StringRef getPassName() const override { return "HWAddressSanitizer"; }
417 
418  bool doInitialization(Module &M) override {
419  HWASan = std::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover,
420  /*SSI=*/nullptr);
421  return true;
422  }
423 
424  bool runOnFunction(Function &F) override {
425  auto TargetTriple = Triple(F.getParent()->getTargetTriple());
426  if (shouldUseStackSafetyAnalysis(TargetTriple, DisableOptimization)) {
427  // We cannot call getAnalysis in doInitialization, that would cause a
428  // crash as the required analyses are not initialized yet.
429  HWASan->setSSI(
430  &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult());
431  }
432  return HWASan->sanitizeFunction(
433  F,
434  [&]() -> const DominatorTree & {
435  return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
436  },
437  [&]() -> const PostDominatorTree & {
438  return getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
439  });
440  }
441 
442  bool doFinalization(Module &M) override {
443  HWASan.reset();
444  return false;
445  }
446 
447  void getAnalysisUsage(AnalysisUsage &AU) const override {
448  // This is an over-estimation of, in case we are building for an
449  // architecture that doesn't allow stack tagging we will still load the
450  // analysis.
451  // This is so we don't need to plumb TargetTriple all the way to here.
452  if (mightUseStackSafetyAnalysis(DisableOptimization))
456  }
457 
458 private:
459  std::unique_ptr<HWAddressSanitizer> HWASan;
460  bool CompileKernel;
461  bool Recover;
462  bool DisableOptimization;
463 };
464 
465 } // end anonymous namespace
466 
468 
470  HWAddressSanitizerLegacyPass, "hwasan",
471  "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
472  false)
477  HWAddressSanitizerLegacyPass, "hwasan",
478  "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
479  false)
480 
481 FunctionPass *
482 llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel, bool Recover,
483  bool DisableOptimization) {
484  assert(!CompileKernel || Recover);
485  return new HWAddressSanitizerLegacyPass(CompileKernel, Recover,
486  DisableOptimization);
487 }
488 
491  const StackSafetyGlobalInfo *SSI = nullptr;
492  auto TargetTriple = llvm::Triple(M.getTargetTriple());
493  if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
495 
496  HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
497  bool Modified = false;
498  auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
499  for (Function &F : M) {
500  Modified |= HWASan.sanitizeFunction(
501  F,
502  [&]() -> const DominatorTree & {
504  },
505  [&]() -> const PostDominatorTree & {
507  });
508  }
509  if (Modified)
510  return PreservedAnalyses::none();
511  return PreservedAnalyses::all();
512 }
514  raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
516  OS, MapClassName2PassName);
517  OS << "<";
518  if (Options.CompileKernel)
519  OS << "kernel;";
520  if (Options.Recover)
521  OS << "recover";
522  OS << ">";
523 }
524 
525 void HWAddressSanitizer::createHwasanCtorComdat() {
526  std::tie(HwasanCtorFunction, std::ignore) =
529  /*InitArgTypes=*/{},
530  /*InitArgs=*/{},
531  // This callback is invoked when the functions are created the first
532  // time. Hook them into the global ctors list in that case:
533  [&](Function *Ctor, FunctionCallee) {
534  Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
535  Ctor->setComdat(CtorComdat);
536  appendToGlobalCtors(M, Ctor, 0, Ctor);
537  });
538 
539  // Create a note that contains pointers to the list of global
540  // descriptors. Adding a note to the output file will cause the linker to
541  // create a PT_NOTE program header pointing to the note that we can use to
542  // find the descriptor list starting from the program headers. A function
543  // provided by the runtime initializes the shadow memory for the globals by
544  // accessing the descriptor list via the note. The dynamic loader needs to
545  // call this function whenever a library is loaded.
546  //
547  // The reason why we use a note for this instead of a more conventional
548  // approach of having a global constructor pass a descriptor list pointer to
549  // the runtime is because of an order of initialization problem. With
550  // constructors we can encounter the following problematic scenario:
551  //
552  // 1) library A depends on library B and also interposes one of B's symbols
553  // 2) B's constructors are called before A's (as required for correctness)
554  // 3) during construction, B accesses one of its "own" globals (actually
555  // interposed by A) and triggers a HWASAN failure due to the initialization
556  // for A not having happened yet
557  //
558  // Even without interposition it is possible to run into similar situations in
559  // cases where two libraries mutually depend on each other.
560  //
561  // We only need one note per binary, so put everything for the note in a
562  // comdat. This needs to be a comdat with an .init_array section to prevent
563  // newer versions of lld from discarding the note.
564  //
565  // Create the note even if we aren't instrumenting globals. This ensures that
566  // binaries linked from object files with both instrumented and
567  // non-instrumented globals will end up with a note, even if a comdat from an
568  // object file with non-instrumented globals is selected. The note is harmless
569  // if the runtime doesn't support it, since it will just be ignored.
570  Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
571 
572  Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
573  auto Start =
574  new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
575  nullptr, "__start_hwasan_globals");
576  Start->setVisibility(GlobalValue::HiddenVisibility);
577  Start->setDSOLocal(true);
578  auto Stop =
579  new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
580  nullptr, "__stop_hwasan_globals");
581  Stop->setVisibility(GlobalValue::HiddenVisibility);
582  Stop->setDSOLocal(true);
583 
584  // Null-terminated so actually 8 bytes, which are required in order to align
585  // the note properly.
586  auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
587 
588  auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
589  Int32Ty, Int32Ty);
590  auto *Note =
591  new GlobalVariable(M, NoteTy, /*isConstant=*/true,
593  Note->setSection(".note.hwasan.globals");
594  Note->setComdat(NoteComdat);
595  Note->setAlignment(Align(4));
596  Note->setDSOLocal(true);
597 
598  // The pointers in the note need to be relative so that the note ends up being
599  // placed in rodata, which is the standard location for notes.
600  auto CreateRelPtr = [&](Constant *Ptr) {
601  return ConstantExpr::getTrunc(
603  ConstantExpr::getPtrToInt(Note, Int64Ty)),
604  Int32Ty);
605  };
606  Note->setInitializer(ConstantStruct::getAnon(
607  {ConstantInt::get(Int32Ty, 8), // n_namesz
608  ConstantInt::get(Int32Ty, 8), // n_descsz
610  Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
611  appendToCompilerUsed(M, Note);
612 
613  // Create a zero-length global in hwasan_globals so that the linker will
614  // always create start and stop symbols.
615  auto Dummy = new GlobalVariable(
616  M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
617  Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
618  Dummy->setSection("hwasan_globals");
619  Dummy->setComdat(NoteComdat);
620  Dummy->setMetadata(LLVMContext::MD_associated,
623 }
624 
625 /// Module-level initialization.
626 ///
627 /// inserts a call to __hwasan_init to the module's constructor list.
628 void HWAddressSanitizer::initializeModule() {
629  LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
630  auto &DL = M.getDataLayout();
631 
632  TargetTriple = Triple(M.getTargetTriple());
633 
634  // x86_64 currently has two modes:
635  // - Intel LAM (default)
636  // - pointer aliasing (heap only)
637  bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
638  UsePageAliases = shouldUsePageAliases(TargetTriple);
639  InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
640  InstrumentStack = shouldInstrumentStack(TargetTriple);
641  DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
642  PointerTagShift = IsX86_64 ? 57 : 56;
643  TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
644 
645  Mapping.init(TargetTriple, InstrumentWithCalls);
646 
647  C = &(M.getContext());
648  IRBuilder<> IRB(*C);
649  IntptrTy = IRB.getIntPtrTy(DL);
650  Int8PtrTy = IRB.getInt8PtrTy();
651  Int8Ty = IRB.getInt8Ty();
652  Int32Ty = IRB.getInt32Ty();
653 
654  HwasanCtorFunction = nullptr;
655 
656  // Older versions of Android do not have the required runtime support for
657  // short granules, global or personality function instrumentation. On other
658  // platforms we currently require using the latest version of the runtime.
659  bool NewRuntime =
660  !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
661 
662  UseShortGranules =
664  OutlinedChecks =
665  TargetTriple.isAArch64() && TargetTriple.isOSBinFormatELF() &&
667 
668  if (ClMatchAllTag.getNumOccurrences()) {
669  if (ClMatchAllTag != -1) {
670  HasMatchAllTag = true;
671  MatchAllTag = ClMatchAllTag & 0xFF;
672  }
673  } else if (CompileKernel) {
674  HasMatchAllTag = true;
675  MatchAllTag = 0xFF;
676  }
677 
678  // If we don't have personality function support, fall back to landing pads.
679  InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
681  : !NewRuntime;
682 
683  if (!CompileKernel) {
684  createHwasanCtorComdat();
685  bool InstrumentGlobals =
686  ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
687 
688  if (InstrumentGlobals && !UsePageAliases)
689  instrumentGlobals();
690 
691  bool InstrumentPersonalityFunctions =
694  : NewRuntime;
695  if (InstrumentPersonalityFunctions)
696  instrumentPersonalityFunctions();
697  }
698 
699  if (!TargetTriple.isAndroid()) {
700  Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
701  auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
703  "__hwasan_tls", nullptr,
705  appendToCompilerUsed(M, GV);
706  return GV;
707  });
708  ThreadPtrGlobal = cast<GlobalVariable>(C);
709  }
710 }
711 
712 void HWAddressSanitizer::initializeCallbacks(Module &M) {
713  IRBuilder<> IRB(*C);
714  for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
715  const std::string TypeStr = AccessIsWrite ? "store" : "load";
716  const std::string EndingStr = Recover ? "_noabort" : "";
717 
718  HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
719  ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
720  FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
721 
722  for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
723  AccessSizeIndex++) {
724  HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
725  M.getOrInsertFunction(
726  ClMemoryAccessCallbackPrefix + TypeStr +
727  itostr(1ULL << AccessSizeIndex) + EndingStr,
728  FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
729  }
730  }
731 
732  HwasanTagMemoryFunc = M.getOrInsertFunction(
733  "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
734  HwasanGenerateTagFunc =
735  M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
736 
737  ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
738  ArrayType::get(IRB.getInt8Ty(), 0));
739 
740  const std::string MemIntrinCallbackPrefix =
741  CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
742  HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
743  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
744  IRB.getInt8PtrTy(), IntptrTy);
745  HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
746  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
747  IRB.getInt8PtrTy(), IntptrTy);
748  HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
749  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
750  IRB.getInt32Ty(), IntptrTy);
751 
752  HWAsanHandleVfork =
753  M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
754 }
755 
756 Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
757  // An empty inline asm with input reg == output reg.
758  // An opaque no-op cast, basically.
759  // This prevents code bloat as a result of rematerializing trivial definitions
760  // such as constants or global addresses at every load and store.
761  InlineAsm *Asm =
762  InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
763  StringRef(""), StringRef("=r,0"),
764  /*hasSideEffects=*/false);
765  return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
766 }
767 
768 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
769  return getOpaqueNoopCast(IRB, ShadowGlobal);
770 }
771 
772 Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
773  if (Mapping.Offset != kDynamicShadowSentinel)
774  return getOpaqueNoopCast(
776  ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
777 
778  if (Mapping.InGlobal) {
779  return getDynamicShadowIfunc(IRB);
780  } else {
781  Value *GlobalDynamicAddress =
784  return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
785  }
786 }
787 
788 bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
789  // Do not instrument acesses from different address spaces; we cannot deal
790  // with them.
791  Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
792  if (PtrTy->getPointerAddressSpace() != 0)
793  return true;
794 
795  // Ignore swifterror addresses.
796  // swifterror memory addresses are mem2reg promoted by instruction
797  // selection. As such they cannot have regular uses like an instrumentation
798  // function and it makes no sense to track them as memory.
799  if (Ptr->isSwiftError())
800  return true;
801 
802  if (findAllocaForValue(Ptr)) {
803  if (!InstrumentStack)
804  return true;
805  if (SSI && SSI->stackAccessIsSafe(*Inst))
806  return true;
807  }
808  return false;
809 }
810 
811 void HWAddressSanitizer::getInterestingMemoryOperands(
813  // Skip memory accesses inserted by another instrumentation.
814  if (I->hasMetadata("nosanitize"))
815  return;
816 
817  // Do not instrument the load fetching the dynamic shadow address.
818  if (ShadowBase == I)
819  return;
820 
821  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
822  if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
823  return;
824  Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
825  LI->getType(), LI->getAlign());
826  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
827  if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
828  return;
829  Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
830  SI->getValueOperand()->getType(), SI->getAlign());
831  } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
832  if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
833  return;
834  Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
835  RMW->getValOperand()->getType(), None);
836  } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
837  if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
838  return;
839  Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
840  XCHG->getCompareOperand()->getType(), None);
841  } else if (auto CI = dyn_cast<CallInst>(I)) {
842  for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
843  if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
844  ignoreAccess(I, CI->getArgOperand(ArgNo)))
845  continue;
846  Type *Ty = CI->getParamByValType(ArgNo);
847  Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
848  }
849  }
850 }
851 
853  if (LoadInst *LI = dyn_cast<LoadInst>(I))
854  return LI->getPointerOperandIndex();
855  if (StoreInst *SI = dyn_cast<StoreInst>(I))
856  return SI->getPointerOperandIndex();
857  if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
858  return RMW->getPointerOperandIndex();
859  if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
860  return XCHG->getPointerOperandIndex();
861  report_fatal_error("Unexpected instruction");
862  return -1;
863 }
864 
866  size_t Res = countTrailingZeros(TypeSize / 8);
868  return Res;
869 }
870 
871 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
872  if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64)
873  return;
874 
875  IRBuilder<> IRB(I);
876  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
877  Value *UntaggedPtr =
878  IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
879  I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
880 }
881 
882 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
883  // Mem >> Scale
884  Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
885  if (Mapping.Offset == 0)
886  return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
887  // (Mem >> Scale) + Offset
888  return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
889 }
890 
891 int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
892  unsigned AccessSizeIndex) {
893  return (CompileKernel << HWASanAccessInfo::CompileKernelShift) +
894  (HasMatchAllTag << HWASanAccessInfo::HasMatchAllShift) +
895  (MatchAllTag << HWASanAccessInfo::MatchAllShift) +
896  (Recover << HWASanAccessInfo::RecoverShift) +
897  (IsWrite << HWASanAccessInfo::IsWriteShift) +
898  (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
899 }
900 
901 void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
902  unsigned AccessSizeIndex,
903  Instruction *InsertBefore) {
904  assert(!UsePageAliases);
905  const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
906  IRBuilder<> IRB(InsertBefore);
907  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
908  Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
910  M, UseShortGranules
911  ? Intrinsic::hwasan_check_memaccess_shortgranules
912  : Intrinsic::hwasan_check_memaccess),
913  {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
914 }
915 
916 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
917  unsigned AccessSizeIndex,
918  Instruction *InsertBefore) {
919  assert(!UsePageAliases);
920  const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
921  IRBuilder<> IRB(InsertBefore);
922 
923  Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
924  Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, PointerTagShift),
925  IRB.getInt8Ty());
926  Value *AddrLong = untagPointer(IRB, PtrLong);
927  Value *Shadow = memToShadow(AddrLong, IRB);
928  Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
929  Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
930 
931  if (HasMatchAllTag) {
932  Value *TagNotIgnored = IRB.CreateICmpNE(
933  PtrTag, ConstantInt::get(PtrTag->getType(), MatchAllTag));
934  TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
935  }
936 
937  Instruction *CheckTerm =
938  SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
939  MDBuilder(*C).createBranchWeights(1, 100000));
940 
941  IRB.SetInsertPoint(CheckTerm);
942  Value *OutOfShortGranuleTagRange =
943  IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
944  Instruction *CheckFailTerm =
945  SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
946  MDBuilder(*C).createBranchWeights(1, 100000));
947 
948  IRB.SetInsertPoint(CheckTerm);
949  Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
950  PtrLowBits = IRB.CreateAdd(
951  PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
952  Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
953  SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
954  MDBuilder(*C).createBranchWeights(1, 100000),
955  (DomTreeUpdater *)nullptr, nullptr,
956  CheckFailTerm->getParent());
957 
958  IRB.SetInsertPoint(CheckTerm);
959  Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
960  InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
961  Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
962  Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
963  SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
964  MDBuilder(*C).createBranchWeights(1, 100000),
965  (DomTreeUpdater *)nullptr, nullptr,
966  CheckFailTerm->getParent());
967 
968  IRB.SetInsertPoint(CheckFailTerm);
969  InlineAsm *Asm;
970  switch (TargetTriple.getArch()) {
971  case Triple::x86_64:
972  // The signal handler will find the data address in rdi.
974  FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
975  "int3\nnopl " +
976  itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
977  "(%rax)",
978  "{rdi}",
979  /*hasSideEffects=*/true);
980  break;
981  case Triple::aarch64:
982  case Triple::aarch64_be:
983  // The signal handler will find the data address in x0.
985  FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
986  "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
987  "{x0}",
988  /*hasSideEffects=*/true);
989  break;
990  default:
991  report_fatal_error("unsupported architecture");
992  }
993  IRB.CreateCall(Asm, PtrLong);
994  if (Recover)
995  cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
996 }
997 
998 bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
999  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1000  return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
1001  (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
1002  }
1003  if (isa<MemSetInst>(MI))
1004  return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
1005  return false;
1006 }
1007 
1008 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1009  IRBuilder<> IRB(MI);
1010  if (isa<MemTransferInst>(MI)) {
1011  IRB.CreateCall(
1012  isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
1013  {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1014  IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
1015  IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1016  } else if (isa<MemSetInst>(MI)) {
1017  IRB.CreateCall(
1018  HWAsanMemset,
1019  {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1020  IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1021  IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1022  }
1023  MI->eraseFromParent();
1024 }
1025 
1026 bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
1027  Value *Addr = O.getPtr();
1028 
1029  LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1030 
1031  if (O.MaybeMask)
1032  return false; // FIXME
1033 
1034  IRBuilder<> IRB(O.getInsn());
1035  if (isPowerOf2_64(O.TypeSize) &&
1036  (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1037  (!O.Alignment || *O.Alignment >= (1ULL << Mapping.Scale) ||
1038  *O.Alignment >= O.TypeSize / 8)) {
1039  size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
1040  if (InstrumentWithCalls) {
1041  IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1042  IRB.CreatePointerCast(Addr, IntptrTy));
1043  } else if (OutlinedChecks) {
1044  instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
1045  } else {
1046  instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
1047  }
1048  } else {
1049  IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
1050  {IRB.CreatePointerCast(Addr, IntptrTy),
1051  ConstantInt::get(IntptrTy, O.TypeSize / 8)});
1052  }
1053  untagPointerOperand(O.getInsn(), Addr);
1054 
1055  return true;
1056 }
1057 
1059  uint64_t ArraySize = 1;
1060  if (AI.isArrayAllocation()) {
1061  const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
1062  assert(CI && "non-constant array size");
1063  ArraySize = CI->getZExtValue();
1064  }
1065  Type *Ty = AI.getAllocatedType();
1066  uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
1067  return SizeInBytes * ArraySize;
1068 }
1069 
1070 void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1071  size_t Size) {
1072  size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1073  if (!UseShortGranules)
1074  Size = AlignedSize;
1075 
1076  Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
1077  if (InstrumentWithCalls) {
1078  IRB.CreateCall(HwasanTagMemoryFunc,
1079  {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
1080  ConstantInt::get(IntptrTy, AlignedSize)});
1081  } else {
1082  size_t ShadowSize = Size >> Mapping.Scale;
1083  Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
1084  // If this memset is not inlined, it will be intercepted in the hwasan
1085  // runtime library. That's OK, because the interceptor skips the checks if
1086  // the address is in the shadow region.
1087  // FIXME: the interceptor is not as fast as real memset. Consider lowering
1088  // llvm.memset right here into either a sequence of stores, or a call to
1089  // hwasan_tag_memory.
1090  if (ShadowSize)
1091  IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1));
1092  if (Size != AlignedSize) {
1093  IRB.CreateStore(
1094  ConstantInt::get(Int8Ty, Size % Mapping.getObjectAlignment()),
1095  IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1096  IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
1097  Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
1098  AlignedSize - 1));
1099  }
1100  }
1101 }
1102 
1103 unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1104  if (TargetTriple.getArch() == Triple::x86_64)
1105  return AllocaNo & TagMaskByte;
1106 
1107  // A list of 8-bit numbers that have at most one run of non-zero bits.
1108  // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1109  // masks.
1110  // The list does not include the value 255, which is used for UAR.
1111  //
1112  // Because we are more likely to use earlier elements of this list than later
1113  // ones, it is sorted in increasing order of probability of collision with a
1114  // mask allocated (temporally) nearby. The program that generated this list
1115  // can be found at:
1116  // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1117  static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240,
1118  48, 16, 120, 248, 56, 24, 8, 124, 252,
1119  60, 28, 12, 4, 126, 254, 62, 30, 14,
1120  6, 2, 127, 63, 31, 15, 7, 3, 1};
1121  return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
1122 }
1123 
1124 Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1125  if (TargetTriple.getArch() == Triple::x86_64) {
1126  Constant *TagMask = ConstantInt::get(IntptrTy, TagMaskByte);
1127  Value *NewTag = IRB.CreateAnd(OldTag, TagMask);
1128  return NewTag;
1129  }
1130  // aarch64 uses 8-bit tags, so no mask is needed.
1131  return OldTag;
1132 }
1133 
1134 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1135  return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1136 }
1137 
1138 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1140  return getNextTagWithCall(IRB);
1141  if (StackBaseTag)
1142  return StackBaseTag;
1143  // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
1144  // first).
1145  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1146  auto GetStackPointerFn = Intrinsic::getDeclaration(
1147  M, Intrinsic::frameaddress,
1148  IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1149  Value *StackPointer = IRB.CreateCall(
1150  GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
1151 
1152  // Extract some entropy from the stack pointer for the tags.
1153  // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1154  // between functions).
1155  Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
1156  Value *StackTag =
1157  applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1158  IRB.CreateLShr(StackPointerLong, 20)));
1159  StackTag->setName("hwasan.stack.base.tag");
1160  return StackTag;
1161 }
1162 
1163 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1164  AllocaInst *AI, unsigned AllocaNo) {
1166  return getNextTagWithCall(IRB);
1167  return IRB.CreateXor(StackTag,
1168  ConstantInt::get(IntptrTy, retagMask(AllocaNo)));
1169 }
1170 
1171 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
1172  if (ClUARRetagToZero)
1173  return ConstantInt::get(IntptrTy, 0);
1175  return getNextTagWithCall(IRB);
1176  return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, TagMaskByte));
1177 }
1178 
1179 // Add a tag to an address.
1180 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1181  Value *PtrLong, Value *Tag) {
1182  assert(!UsePageAliases);
1183  Value *TaggedPtrLong;
1184  if (CompileKernel) {
1185  // Kernel addresses have 0xFF in the most significant byte.
1186  Value *ShiftedTag =
1187  IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1188  ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1189  TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1190  } else {
1191  // Userspace can simply do OR (tag << PointerTagShift);
1192  Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1193  TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1194  }
1195  return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1196 }
1197 
1198 // Remove tag from an address.
1199 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1200  assert(!UsePageAliases);
1201  Value *UntaggedPtrLong;
1202  if (CompileKernel) {
1203  // Kernel addresses have 0xFF in the most significant byte.
1204  UntaggedPtrLong =
1205  IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1206  0xFFULL << PointerTagShift));
1207  } else {
1208  // Userspace addresses have 0x00.
1209  UntaggedPtrLong =
1210  IRB.CreateAnd(PtrLong, ConstantInt::get(PtrLong->getType(),
1211  ~(0xFFULL << PointerTagShift)));
1212  }
1213  return UntaggedPtrLong;
1214 }
1215 
1216 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1217  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1218  if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1219  // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1220  // in Bionic's libc/private/bionic_tls.h.
1221  Function *ThreadPointerFunc =
1222  Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1223  Value *SlotPtr = IRB.CreatePointerCast(
1224  IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
1225  IRB.CreateCall(ThreadPointerFunc), 0x30),
1226  Ty->getPointerTo(0));
1227  return SlotPtr;
1228  }
1229  if (ThreadPtrGlobal)
1230  return ThreadPtrGlobal;
1231 
1232  return nullptr;
1233 }
1234 
1235 void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1236  if (!Mapping.InTls)
1237  ShadowBase = getShadowNonTls(IRB);
1238  else if (!WithFrameRecord && TargetTriple.isAndroid())
1239  ShadowBase = getDynamicShadowIfunc(IRB);
1240 
1241  if (!WithFrameRecord && ShadowBase)
1242  return;
1243 
1244  Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1245  assert(SlotPtr);
1246 
1247  Value *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1248  // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
1249  Value *ThreadLongMaybeUntagged =
1250  TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
1251 
1252  if (WithFrameRecord) {
1253  Function *F = IRB.GetInsertBlock()->getParent();
1254  StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1255 
1256  // Prepare ring buffer data.
1257  Value *PC;
1258  if (TargetTriple.getArch() == Triple::aarch64)
1259  PC = readRegister(IRB, "pc");
1260  else
1261  PC = IRB.CreatePtrToInt(F, IntptrTy);
1262  Module *M = F->getParent();
1263  auto GetStackPointerFn = Intrinsic::getDeclaration(
1264  M, Intrinsic::frameaddress,
1265  IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1266  Value *SP = IRB.CreatePtrToInt(
1267  IRB.CreateCall(GetStackPointerFn,
1268  {Constant::getNullValue(IRB.getInt32Ty())}),
1269  IntptrTy);
1270  // Mix SP and PC.
1271  // Assumptions:
1272  // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1273  // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
1274  // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1275  // 0xSSSSPPPPPPPPPPPP
1276  SP = IRB.CreateShl(SP, 44);
1277 
1278  // Store data to ring buffer.
1279  Value *RecordPtr =
1280  IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
1281  IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
1282 
1283  // Update the ring buffer. Top byte of ThreadLong defines the size of the
1284  // buffer in pages, it must be a power of two, and the start of the buffer
1285  // must be aligned by twice that much. Therefore wrap around of the ring
1286  // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1287  // The use of AShr instead of LShr is due to
1288  // https://bugs.llvm.org/show_bug.cgi?id=39030
1289  // Runtime library makes sure not to use the highest bit.
1290  Value *WrapMask = IRB.CreateXor(
1291  IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1292  ConstantInt::get(IntptrTy, (uint64_t)-1));
1293  Value *ThreadLongNew = IRB.CreateAnd(
1294  IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1295  IRB.CreateStore(ThreadLongNew, SlotPtr);
1296  }
1297 
1298  if (!ShadowBase) {
1299  // Get shadow base address by aligning RecordPtr up.
1300  // Note: this is not correct if the pointer is already aligned.
1301  // Runtime library will make sure this never happens.
1302  ShadowBase = IRB.CreateAdd(
1303  IRB.CreateOr(
1304  ThreadLongMaybeUntagged,
1305  ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1306  ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1307  ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
1308  }
1309 }
1310 
1311 Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1312  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1313  Function *ReadRegister =
1314  Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1315  MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1316  Value *Args[] = {MetadataAsValue::get(*C, MD)};
1317  return IRB.CreateCall(ReadRegister, Args);
1318 }
1319 
1320 bool HWAddressSanitizer::instrumentLandingPads(
1321  SmallVectorImpl<Instruction *> &LandingPadVec) {
1322  for (auto *LP : LandingPadVec) {
1323  IRBuilder<> IRB(LP->getNextNode());
1324  IRB.CreateCall(
1325  HWAsanHandleVfork,
1326  {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1327  : "sp")});
1328  }
1329  return true;
1330 }
1331 
1332 static bool
1334  const DominatorTree &DT) {
1335  // If we have too many lifetime ends, give up, as the algorithm below is N^2.
1336  if (Insts.size() > ClMaxLifetimes)
1337  return true;
1338  for (size_t I = 0; I < Insts.size(); ++I) {
1339  for (size_t J = 0; J < Insts.size(); ++J) {
1340  if (I == J)
1341  continue;
1342  if (isPotentiallyReachable(Insts[I], Insts[J], nullptr, &DT))
1343  return true;
1344  }
1345  }
1346  return false;
1347 }
1348 
1349 // static
1350 bool HWAddressSanitizer::isStandardLifetime(const AllocaInfo &AllocaInfo,
1351  const DominatorTree &DT) {
1352  // An alloca that has exactly one start and end in every possible execution.
1353  // If it has multiple ends, they have to be unreachable from each other, so
1354  // at most one of them is actually used for each execution of the function.
1355  return AllocaInfo.LifetimeStart.size() == 1 &&
1356  (AllocaInfo.LifetimeEnd.size() == 1 ||
1357  (AllocaInfo.LifetimeEnd.size() > 0 &&
1358  !maybeReachableFromEachOther(AllocaInfo.LifetimeEnd, DT)));
1359 }
1360 
1361 bool HWAddressSanitizer::instrumentStack(
1362  MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument,
1363  SmallVector<Instruction *, 4> &UnrecognizedLifetimes,
1364  DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
1365  SmallVectorImpl<Instruction *> &RetVec, Value *StackTag,
1366  llvm::function_ref<const DominatorTree &()> GetDT,
1367  llvm::function_ref<const PostDominatorTree &()> GetPDT) {
1368  // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1369  // alloca addresses using that. Unfortunately, offsets are not known yet
1370  // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1371  // temp, shift-OR it into each alloca address and xor with the retag mask.
1372  // This generates one extra instruction per alloca use.
1373  unsigned int I = 0;
1374 
1375  for (auto &KV : AllocasToInstrument) {
1376  auto N = I++;
1377  auto *AI = KV.first;
1378  AllocaInfo &Info = KV.second;
1379  IRBuilder<> IRB(AI->getNextNode());
1380 
1381  // Replace uses of the alloca with tagged address.
1382  Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
1383  Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1384  Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
1385  std::string Name =
1386  AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1387  Replacement->setName(Name + ".hwasan");
1388 
1389  AI->replaceUsesWithIf(Replacement,
1390  [AILong](Use &U) { return U.getUser() != AILong; });
1391 
1392  for (auto *DDI : AllocaDbgMap.lookup(AI)) {
1393  // Prepend "tag_offset, N" to the dwarf expression.
1394  // Tag offset logically applies to the alloca pointer, and it makes sense
1395  // to put it at the beginning of the expression.
1397  retagMask(N)};
1398  for (size_t LocNo = 0; LocNo < DDI->getNumVariableLocationOps(); ++LocNo)
1399  if (DDI->getVariableLocationOp(LocNo) == AI)
1400  DDI->setExpression(DIExpression::appendOpsToArg(DDI->getExpression(),
1401  NewOps, LocNo));
1402  }
1403 
1404  size_t Size = getAllocaSizeInBytes(*AI);
1405  size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1406  bool StandardLifetime =
1407  UnrecognizedLifetimes.empty() && isStandardLifetime(Info, GetDT());
1408  if (DetectUseAfterScope && StandardLifetime) {
1409  IntrinsicInst *Start = Info.LifetimeStart[0];
1410  IRB.SetInsertPoint(Start->getNextNode());
1411  auto TagEnd = [&](Instruction *Node) {
1412  IRB.SetInsertPoint(Node);
1413  Value *UARTag = getUARTag(IRB, StackTag);
1414  tagAlloca(IRB, AI, UARTag, AlignedSize);
1415  };
1416  tagAlloca(IRB, AI, Tag, Size);
1417  if (!forAllReachableExits(GetDT(), GetPDT(), Start, Info.LifetimeEnd,
1418  RetVec, TagEnd)) {
1419  for (auto *End : Info.LifetimeEnd)
1420  End->eraseFromParent();
1421  }
1422  } else {
1423  tagAlloca(IRB, AI, Tag, Size);
1424  for (auto *RI : RetVec) {
1425  IRB.SetInsertPoint(RI);
1426  Value *UARTag = getUARTag(IRB, StackTag);
1427  tagAlloca(IRB, AI, UARTag, AlignedSize);
1428  }
1429  if (!StandardLifetime) {
1430  for (auto &II : Info.LifetimeStart)
1431  II->eraseFromParent();
1432  for (auto &II : Info.LifetimeEnd)
1433  II->eraseFromParent();
1434  }
1435  }
1436  }
1437  for (auto &I : UnrecognizedLifetimes)
1438  I->eraseFromParent();
1439  return true;
1440 }
1441 
1442 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1443  return (AI.getAllocatedType()->isSized() &&
1444  // FIXME: instrument dynamic allocas, too
1445  AI.isStaticAlloca() &&
1446  // alloca() may be called with 0 size, ignore it.
1447  getAllocaSizeInBytes(AI) > 0 &&
1448  // We are only interested in allocas not promotable to registers.
1449  // Promotable allocas are common under -O0.
1450  !isAllocaPromotable(&AI) &&
1451  // inalloca allocas are not treated as static, and we don't want
1452  // dynamic alloca instrumentation for them as well.
1453  !AI.isUsedWithInAlloca() &&
1454  // swifterror allocas are register promoted by ISel
1455  !AI.isSwiftError()) &&
1456  // safe allocas are not interesting
1457  !(SSI && SSI->isSafe(AI));
1458 }
1459 
1460 DenseMap<AllocaInst *, AllocaInst *> HWAddressSanitizer::padInterestingAllocas(
1461  const MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument) {
1462  DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
1463  for (auto &KV : AllocasToInstrument) {
1464  AllocaInst *AI = KV.first;
1466  uint64_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1467  AI->setAlignment(
1468  Align(std::max(AI->getAlignment(), Mapping.getObjectAlignment())));
1469  if (Size != AlignedSize) {
1470  Type *AllocatedType = AI->getAllocatedType();
1471  if (AI->isArrayAllocation()) {
1472  uint64_t ArraySize =
1473  cast<ConstantInt>(AI->getArraySize())->getZExtValue();
1474  AllocatedType = ArrayType::get(AllocatedType, ArraySize);
1475  }
1476  Type *TypeWithPadding = StructType::get(
1477  AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size));
1478  auto *NewAI = new AllocaInst(
1479  TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
1480  NewAI->takeName(AI);
1481  NewAI->setAlignment(AI->getAlign());
1482  NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
1483  NewAI->setSwiftError(AI->isSwiftError());
1484  NewAI->copyMetadata(*AI);
1485  auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI);
1487  AllocaToPaddedAllocaMap[AI] = NewAI;
1488  }
1489  }
1490  return AllocaToPaddedAllocaMap;
1491 }
1492 
1493 bool HWAddressSanitizer::sanitizeFunction(
1494  Function &F, llvm::function_ref<const DominatorTree &()> GetDT,
1495  llvm::function_ref<const PostDominatorTree &()> GetPDT) {
1496  if (&F == HwasanCtorFunction)
1497  return false;
1498 
1499  if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1500  return false;
1501 
1502  LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1503 
1504  SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1505  SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1506  MapVector<AllocaInst *, AllocaInfo> AllocasToInstrument;
1508  SmallVector<Instruction *, 8> LandingPadVec;
1509  SmallVector<Instruction *, 4> UnrecognizedLifetimes;
1511  for (auto &BB : F) {
1512  for (auto &Inst : BB) {
1513  if (InstrumentStack) {
1514  if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
1515  if (isInterestingAlloca(*AI))
1516  AllocasToInstrument.insert({AI, {}});
1517  continue;
1518  }
1519  auto *II = dyn_cast<IntrinsicInst>(&Inst);
1520  if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
1521  II->getIntrinsicID() == Intrinsic::lifetime_end)) {
1522  AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
1523  if (!AI) {
1524  UnrecognizedLifetimes.push_back(&Inst);
1525  continue;
1526  }
1527  if (!isInterestingAlloca(*AI))
1528  continue;
1529  if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1530  AllocasToInstrument[AI].LifetimeStart.push_back(II);
1531  else
1532  AllocasToInstrument[AI].LifetimeEnd.push_back(II);
1533  continue;
1534  }
1535  }
1536 
1537  if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
1538  isa<CleanupReturnInst>(Inst))
1539  RetVec.push_back(&Inst);
1540 
1541  if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
1542  for (Value *V : DVI->location_ops()) {
1543  if (auto *Alloca = dyn_cast_or_null<AllocaInst>(V))
1544  if (!AllocaDbgMap.count(Alloca) ||
1545  AllocaDbgMap[Alloca].back() != DVI)
1546  AllocaDbgMap[Alloca].push_back(DVI);
1547  }
1548  }
1549 
1550  if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1551  LandingPadVec.push_back(&Inst);
1552 
1553  getInterestingMemoryOperands(&Inst, OperandsToInstrument);
1554 
1555  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1556  if (!ignoreMemIntrinsic(MI))
1557  IntrinToInstrument.push_back(MI);
1558  }
1559  }
1560 
1561  initializeCallbacks(*F.getParent());
1562 
1563  bool Changed = false;
1564 
1565  if (!LandingPadVec.empty())
1566  Changed |= instrumentLandingPads(LandingPadVec);
1567 
1568  if (AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1569  F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1570  // __hwasan_personality_thunk is a no-op for functions without an
1571  // instrumented stack, so we can drop it.
1572  F.setPersonalityFn(nullptr);
1573  Changed = true;
1574  }
1575 
1576  if (AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1577  IntrinToInstrument.empty())
1578  return Changed;
1579 
1580  assert(!ShadowBase);
1581 
1582  Instruction *InsertPt = &*F.getEntryBlock().begin();
1583  IRBuilder<> EntryIRB(InsertPt);
1584  emitPrologue(EntryIRB,
1585  /*WithFrameRecord*/ ClRecordStackHistory &&
1586  Mapping.WithFrameRecord && !AllocasToInstrument.empty());
1587 
1588  if (!AllocasToInstrument.empty()) {
1589  Value *StackTag =
1590  ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1591  instrumentStack(AllocasToInstrument, UnrecognizedLifetimes, AllocaDbgMap,
1592  RetVec, StackTag, GetDT, GetPDT);
1593  }
1594  // Pad and align each of the allocas that we instrumented to stop small
1595  // uninteresting allocas from hiding in instrumented alloca's padding and so
1596  // that we have enough space to store real tags for short granules.
1597  DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap =
1598  padInterestingAllocas(AllocasToInstrument);
1599 
1600  if (!AllocaToPaddedAllocaMap.empty()) {
1601  for (auto &BB : F) {
1602  for (auto &Inst : BB) {
1603  if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
1604  SmallDenseSet<Value *> LocationOps(DVI->location_ops().begin(),
1605  DVI->location_ops().end());
1606  for (Value *V : LocationOps) {
1607  if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
1608  if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
1609  DVI->replaceVariableLocationOp(V, NewAI);
1610  }
1611  }
1612  }
1613  }
1614  }
1615  for (auto &P : AllocaToPaddedAllocaMap)
1616  P.first->eraseFromParent();
1617  }
1618 
1619  // If we split the entry block, move any allocas that were originally in the
1620  // entry block back into the entry block so that they aren't treated as
1621  // dynamic allocas.
1622  if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1623  InsertPt = &*F.getEntryBlock().begin();
1624  for (Instruction &I :
1625  llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1626  if (auto *AI = dyn_cast<AllocaInst>(&I))
1627  if (isa<ConstantInt>(AI->getArraySize()))
1628  I.moveBefore(InsertPt);
1629  }
1630  }
1631 
1632  for (auto &Operand : OperandsToInstrument)
1633  instrumentMemAccess(Operand);
1634 
1635  if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1636  for (auto Inst : IntrinToInstrument)
1637  instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1638  }
1639 
1640  ShadowBase = nullptr;
1641  StackBaseTag = nullptr;
1642 
1643  return true;
1644 }
1645 
1646 void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1647  assert(!UsePageAliases);
1648  Constant *Initializer = GV->getInitializer();
1649  uint64_t SizeInBytes =
1650  M.getDataLayout().getTypeAllocSize(Initializer->getType());
1651  uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1652  if (SizeInBytes != NewSize) {
1653  // Pad the initializer out to the next multiple of 16 bytes and add the
1654  // required short granule tag.
1655  std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1656  Init.back() = Tag;
1657  Constant *Padding = ConstantDataArray::get(*C, Init);
1658  Initializer = ConstantStruct::getAnon({Initializer, Padding});
1659  }
1660 
1661  auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1662  GlobalValue::ExternalLinkage, Initializer,
1663  GV->getName() + ".hwasan");
1664  NewGV->copyAttributesFrom(GV);
1665  NewGV->setLinkage(GlobalValue::PrivateLinkage);
1666  NewGV->copyMetadata(GV, 0);
1667  NewGV->setAlignment(
1668  MaybeAlign(std::max(GV->getAlignment(), Mapping.getObjectAlignment())));
1669 
1670  // It is invalid to ICF two globals that have different tags. In the case
1671  // where the size of the global is a multiple of the tag granularity the
1672  // contents of the globals may be the same but the tags (i.e. symbol values)
1673  // may be different, and the symbols are not considered during ICF. In the
1674  // case where the size is not a multiple of the granularity, the short granule
1675  // tags would discriminate two globals with different tags, but there would
1676  // otherwise be nothing stopping such a global from being incorrectly ICF'd
1677  // with an uninstrumented (i.e. tag 0) global that happened to have the short
1678  // granule tag in the last byte.
1679  NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1680 
1681  // Descriptor format (assuming little-endian):
1682  // bytes 0-3: relative address of global
1683  // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1684  // it isn't, we create multiple descriptors)
1685  // byte 7: tag
1686  auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1687  const uint64_t MaxDescriptorSize = 0xfffff0;
1688  for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1689  DescriptorPos += MaxDescriptorSize) {
1690  auto *Descriptor =
1691  new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1692  nullptr, GV->getName() + ".hwasan.descriptor");
1693  auto *GVRelPtr = ConstantExpr::getTrunc(
1696  ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1697  ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1698  ConstantInt::get(Int64Ty, DescriptorPos)),
1699  Int32Ty);
1700  uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1701  auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1702  Descriptor->setComdat(NewGV->getComdat());
1703  Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1704  Descriptor->setSection("hwasan_globals");
1705  Descriptor->setMetadata(LLVMContext::MD_associated,
1706  MDNode::get(*C, ValueAsMetadata::get(NewGV)));
1707  appendToCompilerUsed(M, Descriptor);
1708  }
1709 
1712  ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1713  ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1714  GV->getType());
1715  auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1716  GV->getLinkage(), "", Aliasee, &M);
1717  Alias->setVisibility(GV->getVisibility());
1718  Alias->takeName(GV);
1719  GV->replaceAllUsesWith(Alias);
1720  GV->eraseFromParent();
1721 }
1722 
1724  NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
1725  if (!Globals)
1726  return DenseSet<GlobalVariable *>();
1727  DenseSet<GlobalVariable *> Excluded(Globals->getNumOperands());
1728  for (auto MDN : Globals->operands()) {
1729  // Metadata node contains the global and the fields of "Entry".
1730  assert(MDN->getNumOperands() == 5);
1731  auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
1732  // The optimizer may optimize away a global entirely.
1733  if (!V)
1734  continue;
1735  auto *StrippedV = V->stripPointerCasts();
1736  auto *GV = dyn_cast<GlobalVariable>(StrippedV);
1737  if (!GV)
1738  continue;
1739  ConstantInt *IsExcluded = mdconst::extract<ConstantInt>(MDN->getOperand(4));
1740  if (IsExcluded->isOne())
1741  Excluded.insert(GV);
1742  }
1743  return Excluded;
1744 }
1745 
1746 void HWAddressSanitizer::instrumentGlobals() {
1747  std::vector<GlobalVariable *> Globals;
1748  auto ExcludedGlobals = getExcludedGlobals(M);
1749  for (GlobalVariable &GV : M.globals()) {
1750  if (ExcludedGlobals.count(&GV))
1751  continue;
1752 
1753  if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
1754  GV.isThreadLocal())
1755  continue;
1756 
1757  // Common symbols can't have aliases point to them, so they can't be tagged.
1758  if (GV.hasCommonLinkage())
1759  continue;
1760 
1761  // Globals with custom sections may be used in __start_/__stop_ enumeration,
1762  // which would be broken both by adding tags and potentially by the extra
1763  // padding/alignment that we insert.
1764  if (GV.hasSection())
1765  continue;
1766 
1767  Globals.push_back(&GV);
1768  }
1769 
1770  MD5 Hasher;
1771  Hasher.update(M.getSourceFileName());
1772  MD5::MD5Result Hash;
1773  Hasher.final(Hash);
1774  uint8_t Tag = Hash[0] & TagMaskByte;
1775 
1776  for (GlobalVariable *GV : Globals) {
1777  // Skip tag 0 in order to avoid collisions with untagged memory.
1778  if (Tag == 0)
1779  Tag = 1;
1780  instrumentGlobal(GV, Tag++);
1781  }
1782 }
1783 
1784 void HWAddressSanitizer::instrumentPersonalityFunctions() {
1785  // We need to untag stack frames as we unwind past them. That is the job of
1786  // the personality function wrapper, which either wraps an existing
1787  // personality function or acts as a personality function on its own. Each
1788  // function that has a personality function or that can be unwound past has
1789  // its personality function changed to a thunk that calls the personality
1790  // function wrapper in the runtime.
1792  for (Function &F : M) {
1793  if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1794  continue;
1795 
1796  if (F.hasPersonalityFn()) {
1797  PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1798  } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1799  PersonalityFns[nullptr].push_back(&F);
1800  }
1801  }
1802 
1803  if (PersonalityFns.empty())
1804  return;
1805 
1806  FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1807  "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
1808  Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
1809  FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1810  FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1811 
1812  for (auto &P : PersonalityFns) {
1813  std::string ThunkName = kHwasanPersonalityThunkName;
1814  if (P.first)
1815  ThunkName += ("." + P.first->getName()).str();
1816  FunctionType *ThunkFnTy = FunctionType::get(
1817  Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
1818  bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1819  cast<GlobalValue>(P.first)->hasLocalLinkage());
1820  auto *ThunkFn = Function::Create(ThunkFnTy,
1823  ThunkName, &M);
1824  if (!IsLocal) {
1825  ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1826  ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1827  }
1828 
1829  auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1830  IRBuilder<> IRB(BB);
1831  CallInst *WrapperCall = IRB.CreateCall(
1832  HwasanPersonalityWrapper,
1833  {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1834  ThunkFn->getArg(3), ThunkFn->getArg(4),
1835  P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
1836  : Constant::getNullValue(Int8PtrTy),
1837  IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
1838  IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
1839  WrapperCall->setTailCall();
1840  IRB.CreateRet(WrapperCall);
1841 
1842  for (Function *F : P.second)
1843  F->setPersonalityFn(ThunkFn);
1844  }
1845 }
1846 
1848  bool InstrumentWithCalls) {
1849  Scale = kDefaultShadowScale;
1850  if (TargetTriple.isOSFuchsia()) {
1851  // Fuchsia is always PIE, which means that the beginning of the address
1852  // space is always available.
1853  InGlobal = false;
1854  InTls = false;
1855  Offset = 0;
1856  WithFrameRecord = true;
1857  } else if (ClMappingOffset.getNumOccurrences() > 0) {
1858  InGlobal = false;
1859  InTls = false;
1861  WithFrameRecord = false;
1862  } else if (ClEnableKhwasan || InstrumentWithCalls) {
1863  InGlobal = false;
1864  InTls = false;
1865  Offset = 0;
1866  WithFrameRecord = false;
1867  } else if (ClWithIfunc) {
1868  InGlobal = true;
1869  InTls = false;
1871  WithFrameRecord = false;
1872  } else if (ClWithTls) {
1873  InGlobal = false;
1874  InTls = true;
1876  WithFrameRecord = true;
1877  } else {
1878  InGlobal = false;
1879  InTls = false;
1881  WithFrameRecord = false;
1882  }
1883 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
kDynamicShadowSentinel
static const uint64_t kDynamicShadowSentinel
Definition: HWAddressSanitizer.cpp:76
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:148
Instrumentation.h
Int32Ty
IntegerType * Int32Ty
Definition: NVVMIntrRange.cpp:67
ClInlineAllChecks
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
StackSafetyAnalysis.h
llvm::IRBuilderBase::CreateIntCast
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2186
llvm::GlobalVariable::eraseFromParent
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:385
llvm::StringRef::startswith
LLVM_NODISCARD bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:286
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:263
llvm::IRBuilderBase::getInt32Ty
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:518
llvm::IRBuilderBase::CreateStore
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1699
hwasan
hwasan
Definition: HWAddressSanitizer.cpp:477
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
llvm::IRBuilderBase::SetInsertPoint
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:184
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
llvm::NamedMDNode
A tuple of MDNodes.
Definition: Metadata.h:1421
llvm::CallInst::setTailCall
void setTailCall(bool IsTc=true)
Definition: Instructions.h:1682
llvm::MD5::update
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:190
llvm::StructType::get
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:408
llvm::GlobalValue::hasCommonLinkage
bool hasCommonLinkage() const
Definition: GlobalValue.h:455
llvm::StackSafetyGlobalInfo::isSafe
bool isSafe(const AllocaInst &AI) const
Definition: StackSafetyAnalysis.cpp:901
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1379
llvm::NamedMDNode::getNumOperands
unsigned getNumOperands() const
Definition: Metadata.cpp:1120
llvm::AllocaInst::getAlign
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:120
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:783
DebugInfoMetadata.h
llvm::PassInfoMixin< HWAddressSanitizerPass >
llvm::GlobalValue::getLinkage
LinkageTypes getLinkage() const
Definition: GlobalValue.h:467
llvm::GlobalValue::HiddenVisibility
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:64
llvm::MemTransferInst
This class wraps the llvm.memcpy/memmove intrinsics.
Definition: IntrinsicInst.h:917
llvm::HWAddressSanitizerOptions::DisableOptimization
bool DisableOptimization
Definition: HWAddressSanitizer.h:30
llvm::Function
Definition: Function.h:62
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::lookup
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
llvm::IRBuilderBase::CreatePtrToInt
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2107
Note
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles Note
Definition: README.txt:239
StringRef.h
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::HWASanAccessInfo::IsWriteShift
@ IsWriteShift
Definition: HWAddressSanitizer.h:61
llvm::BitCastInst
This class represents a no-op cast from one type to another.
Definition: Instructions.h:5198
llvm::IRBuilderBase::CreateXor
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1414
llvm::Type::getScalarType
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:308
kHwasanPersonalityThunkName
const char kHwasanPersonalityThunkName[]
Definition: HWAddressSanitizer.cpp:67
llvm::AllocaInst::getType
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:104
llvm::ilist_node_with_parent::getNextNode
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::createHWAddressSanitizerLegacyPassPass
FunctionPass * createHWAddressSanitizerLegacyPassPass(bool CompileKernel=false, bool Recover=false, bool DisableOptimization=false)
Definition: HWAddressSanitizer.cpp:482
InlineAsm.h
llvm::LegacyLegalizeActions::Bitcast
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Definition: LegacyLegalizerInfo.h:54
llvm::Value::hasName
bool hasName() const
Definition: Value.h:262
llvm::GlobalValue::UnnamedAddr::None
@ None
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:734
llvm::SmallDenseSet
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:286
llvm::IRBuilder<>
MapVector.h
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::PointerType::getAddressSpace
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:687
llvm::FunctionType::get
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:363
ValueTracking.h
llvm::IRBuilderBase::CreateOr
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1388
kShadowBaseAlignment
static const unsigned kShadowBaseAlignment
Definition: HWAddressSanitizer.cpp:79
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
llvm::InterestingMemoryOperand
Definition: AddressSanitizerCommon.h:25
FAM
FunctionAnalysisManager FAM
Definition: PassBuilderBindings.cpp:59
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::dwarf::DW_OP_LLVM_tag_offset
@ DW_OP_LLVM_tag_offset
Only used in LLVM metadata.
Definition: Dwarf.h:144
llvm::PreservedAnalyses::none
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:158
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Module.h
llvm::Triple::x86_64
@ x86_64
Definition: Triple.h:84
llvm::forAllReachableExits
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, F Callback)
Definition: AddressSanitizerCommon.h:56
llvm::initializeHWAddressSanitizerLegacyPassPass
void initializeHWAddressSanitizerLegacyPassPass(PassRegistry &)
llvm::HWASanAccessInfo::RecoverShift
@ RecoverShift
Definition: HWAddressSanitizer.h:62
llvm::MemIntrinsic
This is the common base class for memset/memcpy/memmove.
Definition: IntrinsicInst.h:874
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::HWASanAccessInfo::HasMatchAllShift
@ HasMatchAllShift
Definition: HWAddressSanitizer.h:64
llvm::ValueAsMetadata::get
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:368
llvm::dwarf::Tag
Tag
Definition: Dwarf.h:104
llvm::IRBuilderBase::CreateAShr
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1342
llvm::detail::DenseSetImpl< ValueT, DenseMap< ValueT, detail::DenseSetEmpty, DenseMapInfo< ValueT >, detail::DenseSetPair< ValueT > >, DenseMapInfo< ValueT > >::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
PassRegistry.h
llvm::cl::ReallyHidden
@ ReallyHidden
Definition: CommandLine.h:144
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:241
llvm::IRBuilderBase::CreateIntToPtr
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2112
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
maybeReachableFromEachOther
static bool maybeReachableFromEachOther(const SmallVectorImpl< IntrinsicInst * > &Insts, const DominatorTree &DT)
Definition: HWAddressSanitizer.cpp:1333
llvm::HWASanAccessInfo::MatchAllShift
@ MatchAllShift
Definition: HWAddressSanitizer.h:63
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1233
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::Value::isSwiftError
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:1011
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ConstantExpr::getSub
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2701
ClInstrumentWithCalls
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
Instruction.h
CommandLine.h
llvm::getOrCreateSanitizerCtorAndInitFunctions
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef())
Creates sanitizer constructor function lazily.
Definition: ModuleUtils.cpp:158
kNumberOfAccessSizes
static const size_t kNumberOfAccessSizes
Definition: HWAddressSanitizer.cpp:73
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::AllocaInst::isStaticAlloca
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Definition: Instructions.cpp:1397
llvm::DIExpression::appendOpsToArg
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
Definition: DebugInfoMetadata.cpp:1312
llvm::Triple::isAndroid
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:676
llvm::HWAddressSanitizerOptions::CompileKernel
bool CompileKernel
Definition: HWAddressSanitizer.h:27
llvm::Triple::isOSBinFormatELF
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:635
llvm::MD5::final
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:235
ELF.h
ClInstrumentAtomics
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
Constants.h
PostDominators.h
ClUsePageAliases
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
HWAddressSanitizer.h
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:113
llvm::IRBuilderBase::CreateMemSet
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:580
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::ConstantExpr::getIntToPtr
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2219
getAllocaSizeInBytes
static uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Definition: HWAddressSanitizer.cpp:1058
llvm::IRBuilderBase::CreateLoad
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1660
llvm::Triple::isAArch64
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:727
MAM
ModuleAnalysisManager MAM
Definition: PassBuilderBindings.cpp:61
llvm::MDBuilder::createBranchWeights
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
llvm::PostDominatorTreeWrapperPass
Definition: PostDominators.h:73
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::ConstantExpr::getPtrToInt
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2205
ClUseShortGranules
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false), cl::ZeroOrMore)
ClWithIfunc
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
AddressSanitizerCommon.h
false
Definition: StackSlotColoring.cpp:142
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:236
llvm::Instruction
Definition: Instruction.h:45
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:287
MDBuilder.h
llvm::AllocaInst::getArraySize
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:100
kDefaultShadowScale
static const size_t kDefaultShadowScale
Definition: HWAddressSanitizer.cpp:75
llvm::IRBuilderBase::getInt8Ty
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:508
llvm::appendToCompilerUsed
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
Definition: ModuleUtils.cpp:110
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
llvm::Value::setName
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:376
LoopDeletionResult::Modified
@ Modified
llvm::Module::getOrInsertGlobal
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:208
llvm::cl::Option::getNumOccurrences
int getNumOccurrences() const
Definition: CommandLine.h:402
llvm::DomTreeUpdater
Definition: DomTreeUpdater.h:28
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:925
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:73
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(HWAddressSanitizerLegacyPass, "hwasan", "HWAddressSanitizer: detect memory bugs using tagged addressing.", false, false) INITIALIZE_PASS_END(HWAddressSanitizerLegacyPass
ClInstrumentPersonalityFunctions
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden, cl::init(false), cl::ZeroOrMore)
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::GlobalValue::InternalLinkage
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
llvm::Comdat
Definition: Comdat.h:31
llvm::Triple::getArch
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:310
llvm::StringRef::str
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:245
llvm::None
const NoneType None
Definition: None.h:23
llvm::InlineAsm::get
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:42
llvm::IRBuilderBase::CreateRet
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:970
Type.h
llvm::IRBuilderBase::CreateAnd
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1362
llvm::IRBuilderBase::CreatePointerCast
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2163
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:148
llvm::Triple::isOSFuchsia
bool isOSFuchsia() const
Definition: Triple.h:517
llvm::IRBuilderBase::CreateZExt
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2025
llvm::cl::ZeroOrMore
@ ZeroOrMore
Definition: CommandLine.h:120
llvm::function_ref
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:168
llvm::InlineAsm
Definition: InlineAsm.h:31
llvm::DenseSet
Implements a dense probed hash-table based set.
Definition: DenseSet.h:268
llvm::ConstantExpr::getTrunc
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2095
ClMatchAllTag
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
BasicBlock.h
llvm::cl::opt
Definition: CommandLine.h:1432
llvm::GlobalObject::hasSection
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:104
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:197
llvm::IRBuilderBase::CreateBitCast
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2117
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:304
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::GlobalVariable::getInitializer
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
Definition: GlobalVariable.h:136
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::HWASanAccessInfo::AccessSizeShift
@ AccessSizeShift
Definition: HWAddressSanitizer.h:60
uint64_t
llvm::GlobalValue::getVisibility
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:229
llvm::StackSafetyGlobalAnalysis
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
Definition: StackSafetyAnalysis.h:128
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:578
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::MD5
Definition: MD5.h:41
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
PromoteMemToReg.h
llvm::DenseMap
Definition: DenseMap.h:714
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::GlobalObject::setComdat
void setComdat(Comdat *C)
Definition: GlobalObject.h:125
llvm::IRBuilderBase::getInt8PtrTy
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:561
StringExtras.h
getExcludedGlobals
static DenseSet< GlobalVariable * > getExcludedGlobals(Module &M)
Definition: HWAddressSanitizer.cpp:1723
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:576
ClInstrumentReads
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
ClMappingOffset
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
llvm::IRBuilderBase::CreateAdd
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1212
llvm::MDString::get
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:473
llvm::ConstantStruct::getAnon
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:462
llvm::Function::Create
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:139
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::HWAddressSanitizerPass::run
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
Definition: HWAddressSanitizer.cpp:489
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::AllocaInst::getAlignment
uint64_t getAlignment() const
Definition: Instructions.h:129
llvm::ELF::NT_LLVM_HWASAN_GLOBALS
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1517
llvm::GlobalValue::isThreadLocal
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:244
llvm::ArrayType::get
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:640
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::MDNode
Metadata node.
Definition: Metadata.h:906
ClGlobals
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false), cl::ZeroOrMore)
ClMemoryAccessCallbackPrefix
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
kHwasanInitName
const char kHwasanInitName[]
Definition: HWAddressSanitizer.cpp:66
Triple.h
llvm::HWAddressSanitizerOptions::Recover
bool Recover
Definition: HWAddressSanitizer.h:29
CFG.h
llvm::NVPTXISD::Dummy
@ Dummy
Definition: NVPTXISelLowering.h:60
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::MapVector::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:117
DataLayout.h
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::Triple::aarch64_be
@ aarch64_be
Definition: Triple.h:53
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
InstVisitor.h
llvm::Instruction::setSuccessor
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Definition: Instruction.cpp:789
llvm::PostDominatorTree
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
Definition: PostDominators.h:28
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
ClInstrumentByval
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
llvm::ConstantDataArray::get
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:691
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
llvm::BasicBlock::Create
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:100
uint32_t
ClRecordStackHistory
static cl::opt< bool > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations " "in a thread-local ring buffer"), cl::Hidden, cl::init(true))
llvm::AllocaInst::isArrayAllocation
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
Definition: Instructions.cpp:1388
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::IRBuilderBase::CreateICmpUGE
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2243
ClInstrumentWrites
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:175
llvm::Init
Definition: Record.h:271
llvm::GlobalObject::getAlignment
uint64_t getAlignment() const
FIXME: Remove this function once transition to Align is over.
Definition: GlobalObject.h:71
llvm::IRBuilderBase::CreateConstGEP1_32
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1861
llvm::NamedMDNode::operands
iterator_range< op_iterator > operands()
Definition: Metadata.h:1517
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:726
llvm::IRBuilderBase::CreateGEP
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1799
llvm::cl::Optional
@ Optional
Definition: CommandLine.h:119
llvm::IRBuilderBase::CreateTrunc
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2021
llvm::FunctionCallee::getCallee
Value * getCallee()
Definition: DerivedTypes.h:184
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:142
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::empty
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:97
llvm::MD5::MD5Result
Definition: MD5.h:43
Attributes.h
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::MapVector::empty
bool empty() const
Definition: MapVector.h:79
Constant.h
llvm::Type::getInt64Ty
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:242
getPointerOperandIndex
static unsigned getPointerOperandIndex(Instruction *I)
Definition: HWAddressSanitizer.cpp:852
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:348
llvm::GlobalAlias::create
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:480
llvm::MetadataAsValue::get
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:106
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::ConstantExpr::getAdd
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2690
kHwasanNoteName
const char kHwasanNoteName[]
Definition: HWAddressSanitizer.cpp:65
llvm::IRBuilderBase::GetInsertBlock
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:178
llvm::TypeSize
Definition: TypeSize.h:417
Casting.h
Function.h
ClUARRetagToZero
static cl::opt< bool > ClUARRetagToZero("hwasan-uar-retag-to-zero", cl::desc("Clear alloca tags before returning from the function to allow " "non-instrumented and instrumented function calls mix. When set " "to false, allocas are retagged before returning from the " "function to detect use after return."), cl::Hidden, cl::init(true))
ClInstrumentLandingPads
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false), cl::ZeroOrMore)
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:776
llvm::HWASanAccessInfo::RuntimeMask
@ RuntimeMask
Definition: HWAddressSanitizer.h:68
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::StackSafetyGlobalInfoWrapperPass
This pass performs the global (interprocedural) stack safety analysis (legacy pass manager).
Definition: StackSafetyAnalysis.h:150
llvm::MDBuilder
Definition: MDBuilder.h:35
llvm::GlobalValue::getAddressSpace
unsigned getAddressSpace() const
Definition: Globals.cpp:112
llvm::GlobalValue::ExternalLinkage
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:48
llvm::AllocaInst::setAlignment
void setAlignment(Align Align)
Definition: Instructions.h:124
llvm::HWAddressSanitizerPass::printPipeline
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: HWAddressSanitizer.cpp:513
llvm::Triple::isAndroidVersionLT
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:678
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:252
llvm::Type::getVoidTy
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:224
llvm::StackSafetyGlobalInfo::stackAccessIsSafe
bool stackAccessIsSafe(const Instruction &I) const
Definition: StackSafetyAnalysis.cpp:906
llvm::GlobalValue::PrivateLinkage
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
Instructions.h
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:138
SmallVector.h
llvm::GlobalVariable::isConstant
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
Definition: GlobalVariable.h:153
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1322
Dominators.h
ModuleUtils.h
N
#define N
llvm::IRBuilderBase::CreateShl
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1301
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
kHwasanModuleCtorName
const char kHwasanModuleCtorName[]
Definition: HWAddressSanitizer.cpp:64
llvm::GlobalValue::isDeclarationForLinker
bool isDeclarationForLinker() const
Definition: GlobalValue.h:540
llvm::FunctionCallee
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
TypeSizeToSizeIndex
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Definition: HWAddressSanitizer.cpp:865
llvm::GlobalValue::getType
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:271
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:401
DerivedTypes.h
llvm::StackSafetyGlobalInfo
Definition: StackSafetyAnalysis.h:58
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::GlobalValue::getValueType
Type * getValueType() const
Definition: GlobalValue.h:273
llvm::HexStyle::Asm
@ Asm
0ffh
Definition: MCInstPrinter.h:34
llvm::InnerAnalysisManagerProxy
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:940
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
ClMaxLifetimes
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1475
llvm::ConstantInt::isOne
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition: Constants.h:200
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::IRBuilderBase::CreateICmpUGT
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2239
llvm::IRBuilderBase::getVoidTy
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:556
llvm::PostDominatorTreeAnalysis
Analysis pass which computes a PostDominatorTree.
Definition: PostDominators.h:47
llvm::Value::replaceUsesWithIf
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:540
ClEnableKhwasan
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
llvm::IRBuilderBase::CreateICmpNE
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2235
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
llvm::HWASanAccessInfo::CompileKernelShift
@ CompileKernelShift
Definition: HWAddressSanitizer.h:65
LLVMContext.h
llvm::SplitBlockAndInsertIfThen
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights, DominatorTree *DT, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
Definition: BasicBlockUtils.cpp:1418
ClGenerateTagsWithCalls
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:62
llvm::appendToGlobalCtors
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:66
llvm::cl::desc
Definition: CommandLine.h:412
ClInstrumentStack
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
raw_ostream.h
ClRecover
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
llvm::GlobalValue::InitialExecTLSModel
@ InitialExecTLSModel
Definition: GlobalValue.h:182
BasicBlockUtils.h
llvm::GlobalValue::LinkOnceODRLinkage
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:51
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:496
Value.h
ClInstrumentMemIntrinsics
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
ClUseAfterScope
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(false))
InitializePasses.h
ClWithTls
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:521
Debug.h
llvm::Triple::aarch64
@ aarch64
Definition: Triple.h:52
ClUseStackSafety
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
llvm::isAllocaPromotable
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
Definition: PromoteMemoryToRegister.cpp:64
llvm::findAllocaForValue
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
Definition: ValueTracking.cpp:4525
kHwasanShadowMemoryDynamicAddress
const char kHwasanShadowMemoryDynamicAddress[]
Definition: HWAddressSanitizer.cpp:69
llvm::FunctionType
Class to represent function types.
Definition: DerivedTypes.h:103
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37
llvm::DataLayout::getTypeAllocSize
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:503