LLVM  10.0.0svn
AArch64StackTagging.cpp
Go to the documentation of this file.
1 //===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
10 
11 #include "AArch64.h"
12 #include "AArch64InstrInfo.h"
13 #include "AArch64Subtarget.h"
14 #include "AArch64TargetMachine.h"
15 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/IR/DebugLoc.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Debug.h"
50 #include <cassert>
51 #include <iterator>
52 #include <utility>
53 
54 using namespace llvm;
55 
56 #define DEBUG_TYPE "stack-tagging"
57 
59  "stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::ZeroOrMore,
60  cl::desc("merge stack variable initializers with tagging when possible"));
61 
62 static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
63  cl::init(40), cl::Hidden);
64 
65 static constexpr unsigned kTagGranuleSize = 16;
66 
67 namespace {
68 
69 class InitializerBuilder {
70  uint64_t Size;
71  const DataLayout *DL;
72  Value *BasePtr;
73  Function *SetTagFn;
74  Function *SetTagZeroFn;
75  Function *StgpFn;
76 
77  // List of initializers sorted by start offset.
78  struct Range {
79  uint64_t Start, End;
80  Instruction *Inst;
81  };
82  SmallVector<Range, 4> Ranges;
83  // 8-aligned offset => 8-byte initializer
84  // Missing keys are zero initialized.
85  std::map<uint64_t, Value *> Out;
86 
87 public:
88  InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
89  Function *SetTagFn, Function *SetTagZeroFn,
90  Function *StgpFn)
91  : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
92  SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
93 
94  bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
95  auto I = std::lower_bound(
96  Ranges.begin(), Ranges.end(), Start,
97  [](const Range &LHS, uint64_t RHS) { return LHS.End <= RHS; });
98  if (I != Ranges.end() && End > I->Start) {
99  // Overlap - bail.
100  return false;
101  }
102  Ranges.insert(I, {Start, End, Inst});
103  return true;
104  }
105 
106  bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
107  int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
108  if (!addRange(Offset, Offset + StoreSize, SI))
109  return false;
110  IRBuilder<> IRB(SI);
111  applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
112  return true;
113  }
114 
115  bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
116  uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
117  if (!addRange(Offset, Offset + StoreSize, MSI))
118  return false;
119  IRBuilder<> IRB(MSI);
120  applyMemSet(IRB, Offset, Offset + StoreSize,
121  cast<ConstantInt>(MSI->getValue()));
122  return true;
123  }
124 
125  void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
126  ConstantInt *V) {
127  // Out[] does not distinguish between zero and undef, and we already know
128  // that this memset does not overlap with any other initializer. Nothing to
129  // do for memset(0).
130  if (V->isZero())
131  return;
132  for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
133  uint64_t Cst = 0x0101010101010101UL;
134  int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
135  if (LowBits)
136  Cst = (Cst >> LowBits) << LowBits;
137  int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
138  if (HighBits)
139  Cst = (Cst << HighBits) >> HighBits;
140  ConstantInt *C =
141  ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
142 
143  Value *&CurrentV = Out[Offset];
144  if (!CurrentV) {
145  CurrentV = C;
146  } else {
147  CurrentV = IRB.CreateOr(CurrentV, C);
148  }
149  }
150  }
151 
152  // Take a 64-bit slice of the value starting at the given offset (in bytes).
153  // Offset can be negative. Pad with zeroes on both sides when necessary.
154  Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
155  if (Offset > 0) {
156  V = IRB.CreateLShr(V, Offset * 8);
157  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
158  } else if (Offset < 0) {
159  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
160  V = IRB.CreateShl(V, -Offset * 8);
161  } else {
162  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
163  }
164  return V;
165  }
166 
167  void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
168  Value *StoredValue) {
169  StoredValue = flatten(IRB, StoredValue);
170  for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
171  Value *V = sliceValue(IRB, StoredValue, Offset - Start);
172  Value *&CurrentV = Out[Offset];
173  if (!CurrentV) {
174  CurrentV = V;
175  } else {
176  CurrentV = IRB.CreateOr(CurrentV, V);
177  }
178  }
179  }
180 
181  void generate(IRBuilder<> &IRB) {
182  LLVM_DEBUG(dbgs() << "Combined initializer\n");
183  // No initializers => the entire allocation is undef.
184  if (Ranges.empty()) {
185  emitUndef(IRB, 0, Size);
186  return;
187  }
188 
189  // Look through 8-byte initializer list 16 bytes at a time;
190  // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
191  // Otherwise, emit zeroes up to next available item.
192  uint64_t LastOffset = 0;
193  for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
194  auto I1 = Out.find(Offset);
195  auto I2 = Out.find(Offset + 8);
196  if (I1 == Out.end() && I2 == Out.end())
197  continue;
198 
199  if (Offset > LastOffset)
200  emitZeroes(IRB, LastOffset, Offset - LastOffset);
201 
202  Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
203  : I1->second;
204  Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
205  : I2->second;
206  emitPair(IRB, Offset, Store1, Store2);
207  LastOffset = Offset + 16;
208  }
209 
210  // memset(0) does not update Out[], therefore the tail can be either undef
211  // or zero.
212  if (LastOffset < Size)
213  emitZeroes(IRB, LastOffset, Size - LastOffset);
214 
215  for (const auto &R : Ranges) {
216  R.Inst->eraseFromParent();
217  }
218  }
219 
220  void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
221  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
222  << ") zero\n");
223  Value *Ptr = BasePtr;
224  if (Offset)
225  Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
226  IRB.CreateCall(SetTagZeroFn,
227  {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
228  }
229 
230  void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
231  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
232  << ") undef\n");
233  Value *Ptr = BasePtr;
234  if (Offset)
235  Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
236  IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
237  }
238 
239  void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
240  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + 16 << "):\n");
241  LLVM_DEBUG(dbgs() << " " << *A << "\n " << *B << "\n");
242  Value *Ptr = BasePtr;
243  if (Offset)
244  Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
245  IRB.CreateCall(StgpFn, {Ptr, A, B});
246  }
247 
248  Value *flatten(IRBuilder<> &IRB, Value *V) {
249  if (V->getType()->isIntegerTy())
250  return V;
251  // vector of pointers -> vector of ints
252  if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
253  LLVMContext &Ctx = IRB.getContext();
254  Type *EltTy = VecTy->getElementType();
255  if (EltTy->isPointerTy()) {
256  uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
257  Type *NewTy = VectorType::get(IntegerType::get(Ctx, EltSize),
258  VecTy->getNumElements());
259  V = IRB.CreatePointerCast(V, NewTy);
260  }
261  }
262  return IRB.CreateBitOrPointerCast(
263  V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
264  }
265 };
266 
267 class AArch64StackTagging : public FunctionPass {
268  struct AllocaInfo {
269  AllocaInst *AI;
270  SmallVector<IntrinsicInst *, 2> LifetimeStart;
272  SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
273  int Tag; // -1 for non-tagged allocations
274  };
275 
276  bool MergeInit;
277 
278 public:
279  static char ID; // Pass ID, replacement for typeid
280 
281  AArch64StackTagging(bool MergeInit = true)
282  : FunctionPass(ID),
283  MergeInit(ClMergeInit.getNumOccurrences() > 0 ? ClMergeInit
284  : MergeInit) {
286  }
287 
288  bool isInterestingAlloca(const AllocaInst &AI);
289  void alignAndPadAlloca(AllocaInfo &Info);
290 
291  void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
292  uint64_t Size);
293  void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
294 
295  Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
296  uint64_t Size, InitializerBuilder &IB);
297 
298  Instruction *
299  insertBaseTaggedPointer(const MapVector<AllocaInst *, AllocaInfo> &Allocas,
300  const DominatorTree *DT);
301  bool runOnFunction(Function &F) override;
302 
303  StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
304 
305 private:
306  Function *F;
307  Function *SetTagFunc;
308  const DataLayout *DL;
309  AAResults *AA;
310 
311  void getAnalysisUsage(AnalysisUsage &AU) const override {
312  AU.setPreservesCFG();
313  if (MergeInit)
315  }
316 };
317 
318 } // end anonymous namespace
319 
320 char AArch64StackTagging::ID = 0;
321 
322 INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
323  false, false)
324 INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
325  false, false)
326 
328  return new AArch64StackTagging(MergeInit);
329 }
330 
331 Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
332  Value *StartPtr,
333  uint64_t Size,
334  InitializerBuilder &IB) {
335  MemoryLocation AllocaLoc{StartPtr, Size};
336  Instruction *LastInst = StartInst;
337  BasicBlock::iterator BI(StartInst);
338 
339  unsigned Count = 0;
340  for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
341  if (!isa<DbgInfoIntrinsic>(*BI))
342  ++Count;
343 
344  if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
345  continue;
346 
347  if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
348  // If the instruction is readnone, ignore it, otherwise bail out. We
349  // don't even allow readonly here because we don't want something like:
350  // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
351  if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
352  break;
353  continue;
354  }
355 
356  if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
357  if (!NextStore->isSimple())
358  break;
359 
360  // Check to see if this store is to a constant offset from the start ptr.
362  isPointerOffset(StartPtr, NextStore->getPointerOperand(), *DL);
363  if (!Offset)
364  break;
365 
366  if (!IB.addStore(*Offset, NextStore, DL))
367  break;
368  LastInst = NextStore;
369  } else {
370  MemSetInst *MSI = cast<MemSetInst>(BI);
371 
372  if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
373  break;
374 
375  if (!isa<ConstantInt>(MSI->getValue()))
376  break;
377 
378  // Check to see if this store is to a constant offset from the start ptr.
379  Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), *DL);
380  if (!Offset)
381  break;
382 
383  if (!IB.addMemSet(*Offset, MSI))
384  break;
385  LastInst = MSI;
386  }
387  }
388  return LastInst;
389 }
390 
391 bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
392  // FIXME: support dynamic allocas
393  bool IsInteresting =
394  AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
395  // alloca() may be called with 0 size, ignore it.
396  AI.getAllocationSizeInBits(*DL).getValue() > 0 &&
397  // inalloca allocas are not treated as static, and we don't want
398  // dynamic alloca instrumentation for them as well.
399  !AI.isUsedWithInAlloca() &&
400  // swifterror allocas are register promoted by ISel
401  !AI.isSwiftError();
402  return IsInteresting;
403 }
404 
405 void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
406  Value *Ptr, uint64_t Size) {
407  auto SetTagZeroFunc =
408  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
409  auto StgpFunc =
410  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
411 
412  InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
413  bool LittleEndian =
414  Triple(AI->getModule()->getTargetTriple()).isLittleEndian();
415  // Current implementation of initializer merging assumes little endianness.
416  if (MergeInit && !F->hasOptNone() && LittleEndian) {
417  LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
418  << ", size = " << Size << "\n");
419  InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
420  }
421 
422  IRBuilder<> IRB(InsertBefore);
423  IB.generate(IRB);
424 }
425 
426 void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
427  uint64_t Size) {
428  IRBuilder<> IRB(InsertBefore);
429  IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
430  ConstantInt::get(IRB.getInt64Ty(), Size)});
431 }
432 
433 Instruction *AArch64StackTagging::insertBaseTaggedPointer(
435  const DominatorTree *DT) {
436  BasicBlock *PrologueBB = nullptr;
437  // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
438  for (auto &I : Allocas) {
439  const AllocaInfo &Info = I.second;
440  AllocaInst *AI = Info.AI;
441  if (Info.Tag < 0)
442  continue;
443  if (!PrologueBB) {
444  PrologueBB = AI->getParent();
445  continue;
446  }
447  PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
448  }
449  assert(PrologueBB);
450 
451  IRBuilder<> IRB(&PrologueBB->front());
452  Function *IRG_SP =
453  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
454  Instruction *Base =
455  IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
456  Base->setName("basetag");
457  return Base;
458 }
459 
460 void AArch64StackTagging::alignAndPadAlloca(AllocaInfo &Info) {
461  unsigned NewAlignment = std::max(Info.AI->getAlignment(), kTagGranuleSize);
462  Info.AI->setAlignment(NewAlignment);
463 
464  uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
465  uint64_t AlignedSize = alignTo(Size, kTagGranuleSize);
466  if (Size == AlignedSize)
467  return;
468 
469  // Add padding to the alloca.
470  Type *AllocatedType =
471  Info.AI->isArrayAllocation()
472  ? ArrayType::get(
473  Info.AI->getAllocatedType(),
474  dyn_cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
475  : Info.AI->getAllocatedType();
476  Type *PaddingType =
477  ArrayType::get(Type::getInt8Ty(F->getContext()), AlignedSize - Size);
478  Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
479  auto *NewAI = new AllocaInst(
480  TypeWithPadding, Info.AI->getType()->getAddressSpace(), nullptr, "", Info.AI);
481  NewAI->takeName(Info.AI);
482  NewAI->setAlignment(Info.AI->getAlignment());
483  NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
484  NewAI->setSwiftError(Info.AI->isSwiftError());
485  NewAI->copyMetadata(*Info.AI);
486 
487  auto *NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
488  Info.AI->replaceAllUsesWith(NewPtr);
489  Info.AI->eraseFromParent();
490  Info.AI = NewAI;
491 }
492 
493 // FIXME: check for MTE extension
495  if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
496  return false;
497 
498  F = &Fn;
499  DL = &Fn.getParent()->getDataLayout();
500  if (MergeInit)
501  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
502 
503  MapVector<AllocaInst *, AllocaInfo> Allocas; // need stable iteration order
505  DenseMap<Value *, AllocaInst *> AllocaForValue;
506  SmallVector<Instruction *, 4> UnrecognizedLifetimes;
507 
508  for (auto &BB : *F) {
509  for (BasicBlock::iterator IT = BB.begin(); IT != BB.end(); ++IT) {
510  Instruction *I = &*IT;
511  if (auto *AI = dyn_cast<AllocaInst>(I)) {
512  Allocas[AI].AI = AI;
513  continue;
514  }
515 
516  if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(I)) {
517  if (auto *AI =
518  dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation())) {
519  Allocas[AI].DbgVariableIntrinsics.push_back(DVI);
520  }
521  continue;
522  }
523 
524  auto *II = dyn_cast<IntrinsicInst>(I);
525  if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
526  II->getIntrinsicID() == Intrinsic::lifetime_end)) {
527  AllocaInst *AI =
528  llvm::findAllocaForValue(II->getArgOperand(1), AllocaForValue);
529  if (!AI) {
530  UnrecognizedLifetimes.push_back(I);
531  continue;
532  }
533  if (II->getIntrinsicID() == Intrinsic::lifetime_start)
534  Allocas[AI].LifetimeStart.push_back(II);
535  else
536  Allocas[AI].LifetimeEnd.push_back(II);
537  }
538 
539  if (isa<ReturnInst>(I) || isa<ResumeInst>(I) || isa<CleanupReturnInst>(I))
540  RetVec.push_back(I);
541  }
542  }
543 
544  if (Allocas.empty())
545  return false;
546 
547  int NextTag = 0;
548  int NumInterestingAllocas = 0;
549  for (auto &I : Allocas) {
550  AllocaInfo &Info = I.second;
551  assert(Info.AI);
552 
553  if (!isInterestingAlloca(*Info.AI)) {
554  Info.Tag = -1;
555  continue;
556  }
557 
558  alignAndPadAlloca(Info);
559  NumInterestingAllocas++;
560  Info.Tag = NextTag;
561  NextTag = (NextTag + 1) % 16;
562  }
563 
564  if (NumInterestingAllocas == 0)
565  return true;
566 
567  SetTagFunc =
568  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
569 
570  // Compute DT only if the function has the attribute, there are more than 1
571  // interesting allocas, and it is not available for free.
572  Instruction *Base;
573  if (NumInterestingAllocas > 1) {
574  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
575  if (DTWP) {
576  Base = insertBaseTaggedPointer(Allocas, &DTWP->getDomTree());
577  } else {
578  DominatorTree DT(*F);
579  Base = insertBaseTaggedPointer(Allocas, &DT);
580  }
581  } else {
582  Base = insertBaseTaggedPointer(Allocas, nullptr);
583  }
584 
585  for (auto &I : Allocas) {
586  const AllocaInfo &Info = I.second;
587  AllocaInst *AI = Info.AI;
588  if (Info.Tag < 0)
589  continue;
590 
591  // Replace alloca with tagp(alloca).
592  IRBuilder<> IRB(Info.AI->getNextNode());
594  F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
595  Instruction *TagPCall =
596  IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
597  ConstantInt::get(IRB.getInt64Ty(), Info.Tag)});
598  if (Info.AI->hasName())
599  TagPCall->setName(Info.AI->getName() + ".tag");
600  Info.AI->replaceAllUsesWith(TagPCall);
601  TagPCall->setOperand(0, Info.AI);
602 
603  if (UnrecognizedLifetimes.empty() && Info.LifetimeStart.size() == 1 &&
604  Info.LifetimeEnd.size() == 1) {
605  IntrinsicInst *Start = Info.LifetimeStart[0];
606  uint64_t Size =
607  dyn_cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
608  Size = alignTo(Size, kTagGranuleSize);
609  tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
610  untagAlloca(AI, Info.LifetimeEnd[0], Size);
611  } else {
612  uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
613  Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
614  tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
615  for (auto &RI : RetVec) {
616  untagAlloca(AI, RI, Size);
617  }
618  // We may have inserted tag/untag outside of any lifetime interval.
619  // Remove all lifetime intrinsics for this alloca.
620  for (auto &II : Info.LifetimeStart)
621  II->eraseFromParent();
622  for (auto &II : Info.LifetimeEnd)
623  II->eraseFromParent();
624  }
625 
626  // Fixup debug intrinsics to point to the new alloca.
627  for (auto DVI : Info.DbgVariableIntrinsics)
628  DVI->setArgOperand(
629  0,
630  MetadataAsValue::get(F->getContext(), LocalAsMetadata::get(Info.AI)));
631  }
632 
633  // If we have instrumented at least one alloca, all unrecognized lifetime
634  // instrinsics have to go.
635  for (auto &I : UnrecognizedLifetimes)
636  I->eraseFromParent();
637 
638  return true;
639 }
auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1261
uint64_t CallInst * C
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
const std::string & getTargetTriple() const
Get the target triple which is a string describing the target host.
Definition: Module.h:240
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1735
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
LLVMContext & getContext() const
Definition: IRBuilder.h:128
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:1888
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
This file contains the declarations for metadata subclasses.
Value * getValue() const
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:135
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
This class wraps the llvm.memset intrinsic.
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:37
F(f)
Value * getLength() const
#define DEBUG_TYPE
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
AArch64 Stack Tagging
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
AnalysisUsage & addRequired()
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:388
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
FunctionPass * createAArch64StackTaggingPass(bool MergeInit)
bool empty() const
Definition: MapVector.h:79
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:285
static StructType * get(LLVMContext &Context, ArrayRef< Type *> Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:341
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:255
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:124
This class represents a no-op cast from one type to another.
An instruction for storing to memory.
Definition: Instructions.h:320
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:291
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1043
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1294
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:105
static bool runOnFunction(Function &F, bool PostInlining)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
const Instruction & front() const
Definition: BasicBlock.h:280
Represent the analysis usage information of a pass.
AllocaInst * findAllocaForValue(Value *V, DenseMap< Value *, AllocaInst *> &AllocaForValue)
Finds alloca where the value comes from.
static cl::opt< unsigned > ClScanLimit("stack-tagging-merge-init-scan-limit", cl::init(40), cl::Hidden)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
static cl::opt< bool > ClMergeInit("stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::desc("merge stack variable initializers with tagging when possible"))
A set of register units.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:396
static LocalAsMetadata * get(Value *Local)
Definition: Metadata.h:435
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:421
bool isVolatile() const
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:43
Optional< uint64_t > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
Representation for a specific memory location.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:239
Iterator for intrusive lists based on ilist_node.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:638
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:301
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void initializeAArch64StackTaggingPass(PassRegistry &)
Optional< int64_t > isPointerOffset(const Value *Ptr1, const Value *Ptr2, const DataLayout &DL)
If Ptr1 is provably equal to Ptr2 plus a constant offset, return that offset.
Class to represent vector types.
Definition: DerivedTypes.h:427
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
static constexpr unsigned kTagGranuleSize
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1207
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:467
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2004
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:602
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2036
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate IT block based on arch"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT, "arm-no-restrict-it", "Allow IT blocks based on ARMv7")))
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:126
INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging", false, false) INITIALIZE_PASS_END(AArch64StackTagging
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:609
#define I(x, y, z)
Definition: MD5.cpp:58
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:582
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2223
static void addRange(SmallVectorImpl< ConstantInt *> &EndPoints, ConstantInt *Low, ConstantInt *High)
Definition: Metadata.cpp:967
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:73
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:445
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1228
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
#define LLVM_DEBUG(X)
Definition: Debug.h:122
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:173
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:59