LLVM  9.0.0svn
SafeStack.cpp
Go to the documentation of this file.
1 //===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10 // and the unsafe stack (explicitly allocated and managed through the runtime
11 // support library).
12 //
13 // http://clang.llvm.org/docs/SafeStack.html
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "SafeStackColoring.h"
18 #include "SafeStackLayout.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/LoopInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/ConstantRange.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InstIterator.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/MDBuilder.h"
52 #include "llvm/IR/Module.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Use.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/Debug.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <string>
70 #include <utility>
71 
72 using namespace llvm;
73 using namespace llvm::safestack;
74 
75 #define DEBUG_TYPE "safe-stack"
76 
77 namespace llvm {
78 
79 STATISTIC(NumFunctions, "Total number of functions");
80 STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
81 STATISTIC(NumUnsafeStackRestorePointsFunctions,
82  "Number of functions that use setjmp or exceptions");
83 
84 STATISTIC(NumAllocas, "Total number of allocas");
85 STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
86 STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
87 STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
88 STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
89 
90 } // namespace llvm
91 
92 /// Use __safestack_pointer_address even if the platform has a faster way of
93 /// access safe stack pointer.
94 static cl::opt<bool>
95  SafeStackUsePointerAddress("safestack-use-pointer-address",
96  cl::init(false), cl::Hidden);
97 
98 
99 namespace {
100 
101 /// Rewrite an SCEV expression for a memory access address to an expression that
102 /// represents offset from the given alloca.
103 ///
104 /// The implementation simply replaces all mentions of the alloca with zero.
105 class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> {
106  const Value *AllocaPtr;
107 
108 public:
109  AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr)
110  : SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {}
111 
112  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
113  if (Expr->getValue() == AllocaPtr)
114  return SE.getZero(Expr->getType());
115  return Expr;
116  }
117 };
118 
119 /// The SafeStack pass splits the stack of each function into the safe
120 /// stack, which is only accessed through memory safe dereferences (as
121 /// determined statically), and the unsafe stack, which contains all
122 /// local variables that are accessed in ways that we can't prove to
123 /// be safe.
124 class SafeStack {
125  Function &F;
126  const TargetLoweringBase &TL;
127  const DataLayout &DL;
128  ScalarEvolution &SE;
129 
130  Type *StackPtrTy;
131  Type *IntPtrTy;
132  Type *Int32Ty;
133  Type *Int8Ty;
134 
135  Value *UnsafeStackPtr = nullptr;
136 
137  /// Unsafe stack alignment. Each stack frame must ensure that the stack is
138  /// aligned to this value. We need to re-align the unsafe stack if the
139  /// alignment of any object on the stack exceeds this value.
140  ///
141  /// 16 seems like a reasonable upper bound on the alignment of objects that we
142  /// might expect to appear on the stack on most common targets.
143  enum { StackAlignment = 16 };
144 
145  /// Return the value of the stack canary.
147 
148  /// Load stack guard from the frame and check if it has changed.
149  void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
150  AllocaInst *StackGuardSlot, Value *StackGuard);
151 
152  /// Find all static allocas, dynamic allocas, return instructions and
153  /// stack restore points (exception unwind blocks and setjmp calls) in the
154  /// given function and append them to the respective vectors.
155  void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
156  SmallVectorImpl<AllocaInst *> &DynamicAllocas,
157  SmallVectorImpl<Argument *> &ByValArguments,
159  SmallVectorImpl<Instruction *> &StackRestorePoints);
160 
161  /// Calculate the allocation size of a given alloca. Returns 0 if the
162  /// size can not be statically determined.
163  uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
164 
165  /// Allocate space for all static allocas in \p StaticAllocas,
166  /// replace allocas with pointers into the unsafe stack and generate code to
167  /// restore the stack pointer before all return instructions in \p Returns.
168  ///
169  /// \returns A pointer to the top of the unsafe stack after all unsafe static
170  /// allocas are allocated.
171  Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
172  ArrayRef<AllocaInst *> StaticAllocas,
173  ArrayRef<Argument *> ByValArguments,
174  ArrayRef<ReturnInst *> Returns,
175  Instruction *BasePointer,
176  AllocaInst *StackGuardSlot);
177 
178  /// Generate code to restore the stack after all stack restore points
179  /// in \p StackRestorePoints.
180  ///
181  /// \returns A local variable in which to maintain the dynamic top of the
182  /// unsafe stack if needed.
183  AllocaInst *
184  createStackRestorePoints(IRBuilder<> &IRB, Function &F,
185  ArrayRef<Instruction *> StackRestorePoints,
186  Value *StaticTop, bool NeedDynamicTop);
187 
188  /// Replace all allocas in \p DynamicAllocas with code to allocate
189  /// space dynamically on the unsafe stack and store the dynamic unsafe stack
190  /// top to \p DynamicTop if non-null.
191  void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
192  AllocaInst *DynamicTop,
193  ArrayRef<AllocaInst *> DynamicAllocas);
194 
195  bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
196 
197  bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
198  const Value *AllocaPtr, uint64_t AllocaSize);
199  bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
200  uint64_t AllocaSize);
201 
202  bool ShouldInlinePointerAddress(CallSite &CS);
203  void TryInlinePointerAddress();
204 
205 public:
206  SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL,
207  ScalarEvolution &SE)
208  : F(F), TL(TL), DL(DL), SE(SE),
209  StackPtrTy(Type::getInt8PtrTy(F.getContext())),
210  IntPtrTy(DL.getIntPtrType(F.getContext())),
212  Int8Ty(Type::getInt8Ty(F.getContext())) {}
213 
214  // Run the transformation on the associated function.
215  // Returns whether the function was changed.
216  bool run();
217 };
218 
219 uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
220  uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
221  if (AI->isArrayAllocation()) {
222  auto C = dyn_cast<ConstantInt>(AI->getArraySize());
223  if (!C)
224  return 0;
225  Size *= C->getZExtValue();
226  }
227  return Size;
228 }
229 
230 bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
231  const Value *AllocaPtr, uint64_t AllocaSize) {
232  AllocaOffsetRewriter Rewriter(SE, AllocaPtr);
233  const SCEV *Expr = Rewriter.visit(SE.getSCEV(Addr));
234 
235  uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
236  ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
237  ConstantRange SizeRange =
238  ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
239  ConstantRange AccessRange = AccessStartRange.add(SizeRange);
240  ConstantRange AllocaRange =
241  ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
242  bool Safe = AllocaRange.contains(AccessRange);
243 
244  LLVM_DEBUG(
245  dbgs() << "[SafeStack] "
246  << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
247  << *AllocaPtr << "\n"
248  << " Access " << *Addr << "\n"
249  << " SCEV " << *Expr
250  << " U: " << SE.getUnsignedRange(Expr)
251  << ", S: " << SE.getSignedRange(Expr) << "\n"
252  << " Range " << AccessRange << "\n"
253  << " AllocaRange " << AllocaRange << "\n"
254  << " " << (Safe ? "safe" : "unsafe") << "\n");
255 
256  return Safe;
257 }
258 
259 bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
260  const Value *AllocaPtr,
261  uint64_t AllocaSize) {
262  if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
263  if (MTI->getRawSource() != U && MTI->getRawDest() != U)
264  return true;
265  } else {
266  if (MI->getRawDest() != U)
267  return true;
268  }
269 
270  const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
271  // Non-constant size => unsafe. FIXME: try SCEV getRange.
272  if (!Len) return false;
273  return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
274 }
275 
276 /// Check whether a given allocation must be put on the safe
277 /// stack or not. The function analyzes all uses of AI and checks whether it is
278 /// only accessed in a memory safe way (as decided statically).
279 bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
280  // Go through all uses of this alloca and check whether all accesses to the
281  // allocated object are statically known to be memory safe and, hence, the
282  // object can be placed on the safe stack.
285  WorkList.push_back(AllocaPtr);
286 
287  // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
288  while (!WorkList.empty()) {
289  const Value *V = WorkList.pop_back_val();
290  for (const Use &UI : V->uses()) {
291  auto I = cast<const Instruction>(UI.getUser());
292  assert(V == UI.get());
293 
294  switch (I->getOpcode()) {
295  case Instruction::Load:
296  if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
297  AllocaSize))
298  return false;
299  break;
300 
301  case Instruction::VAArg:
302  // "va-arg" from a pointer is safe.
303  break;
304  case Instruction::Store:
305  if (V == I->getOperand(0)) {
306  // Stored the pointer - conservatively assume it may be unsafe.
307  LLVM_DEBUG(dbgs()
308  << "[SafeStack] Unsafe alloca: " << *AllocaPtr
309  << "\n store of address: " << *I << "\n");
310  return false;
311  }
312 
313  if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
314  AllocaPtr, AllocaSize))
315  return false;
316  break;
317 
318  case Instruction::Ret:
319  // Information leak.
320  return false;
321 
322  case Instruction::Call:
323  case Instruction::Invoke: {
324  ImmutableCallSite CS(I);
325 
326  if (I->isLifetimeStartOrEnd())
327  continue;
328 
329  if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
330  if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
331  LLVM_DEBUG(dbgs()
332  << "[SafeStack] Unsafe alloca: " << *AllocaPtr
333  << "\n unsafe memintrinsic: " << *I << "\n");
334  return false;
335  }
336  continue;
337  }
338 
339  // LLVM 'nocapture' attribute is only set for arguments whose address
340  // is not stored, passed around, or used in any other non-trivial way.
341  // We assume that passing a pointer to an object as a 'nocapture
342  // readnone' argument is safe.
343  // FIXME: a more precise solution would require an interprocedural
344  // analysis here, which would look at all uses of an argument inside
345  // the function being called.
347  for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
348  if (A->get() == V)
349  if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
350  CS.doesNotAccessMemory()))) {
351  LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
352  << "\n unsafe call: " << *I << "\n");
353  return false;
354  }
355  continue;
356  }
357 
358  default:
359  if (Visited.insert(I).second)
360  WorkList.push_back(cast<const Instruction>(I));
361  }
362  }
363  }
364 
365  // All uses of the alloca are safe, we can place it on the safe stack.
366  return true;
367 }
368 
370  Value *StackGuardVar = TL.getIRStackGuard(IRB);
371  if (!StackGuardVar)
372  StackGuardVar =
373  F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy);
374  return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
375 }
376 
377 void SafeStack::findInsts(Function &F,
378  SmallVectorImpl<AllocaInst *> &StaticAllocas,
379  SmallVectorImpl<AllocaInst *> &DynamicAllocas,
380  SmallVectorImpl<Argument *> &ByValArguments,
382  SmallVectorImpl<Instruction *> &StackRestorePoints) {
383  for (Instruction &I : instructions(&F)) {
384  if (auto AI = dyn_cast<AllocaInst>(&I)) {
385  ++NumAllocas;
386 
387  uint64_t Size = getStaticAllocaAllocationSize(AI);
388  if (IsSafeStackAlloca(AI, Size))
389  continue;
390 
391  if (AI->isStaticAlloca()) {
392  ++NumUnsafeStaticAllocas;
393  StaticAllocas.push_back(AI);
394  } else {
395  ++NumUnsafeDynamicAllocas;
396  DynamicAllocas.push_back(AI);
397  }
398  } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
399  Returns.push_back(RI);
400  } else if (auto CI = dyn_cast<CallInst>(&I)) {
401  // setjmps require stack restore.
402  if (CI->getCalledFunction() && CI->canReturnTwice())
403  StackRestorePoints.push_back(CI);
404  } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
405  // Exception landing pads require stack restore.
406  StackRestorePoints.push_back(LP);
407  } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
408  if (II->getIntrinsicID() == Intrinsic::gcroot)
410  "gcroot intrinsic not compatible with safestack attribute");
411  }
412  }
413  for (Argument &Arg : F.args()) {
414  if (!Arg.hasByValAttr())
415  continue;
416  uint64_t Size =
417  DL.getTypeStoreSize(Arg.getType()->getPointerElementType());
418  if (IsSafeStackAlloca(&Arg, Size))
419  continue;
420 
421  ++NumUnsafeByValArguments;
422  ByValArguments.push_back(&Arg);
423  }
424 }
425 
426 AllocaInst *
427 SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
428  ArrayRef<Instruction *> StackRestorePoints,
429  Value *StaticTop, bool NeedDynamicTop) {
430  assert(StaticTop && "The stack top isn't set.");
431 
432  if (StackRestorePoints.empty())
433  return nullptr;
434 
435  // We need the current value of the shadow stack pointer to restore
436  // after longjmp or exception catching.
437 
438  // FIXME: On some platforms this could be handled by the longjmp/exception
439  // runtime itself.
440 
441  AllocaInst *DynamicTop = nullptr;
442  if (NeedDynamicTop) {
443  // If we also have dynamic alloca's, the stack pointer value changes
444  // throughout the function. For now we store it in an alloca.
445  DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
446  "unsafe_stack_dynamic_ptr");
447  IRB.CreateStore(StaticTop, DynamicTop);
448  }
449 
450  // Restore current stack pointer after longjmp/exception catch.
451  for (Instruction *I : StackRestorePoints) {
452  ++NumUnsafeStackRestorePoints;
453 
454  IRB.SetInsertPoint(I->getNextNode());
455  Value *CurrentTop =
456  DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
457  IRB.CreateStore(CurrentTop, UnsafeStackPtr);
458  }
459 
460  return DynamicTop;
461 }
462 
463 void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
464  AllocaInst *StackGuardSlot, Value *StackGuard) {
465  Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
466  Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
467 
469  auto FailureProb = BranchProbabilityInfo::getBranchProbStackProtector(false);
470  MDNode *Weights = MDBuilder(F.getContext())
471  .createBranchWeights(SuccessProb.getNumerator(),
472  FailureProb.getNumerator());
473  Instruction *CheckTerm =
474  SplitBlockAndInsertIfThen(Cmp, &RI,
475  /* Unreachable */ true, Weights);
476  IRBuilder<> IRBFail(CheckTerm);
477  // FIXME: respect -fsanitize-trap / -ftrap-function here?
478  FunctionCallee StackChkFail =
479  F.getParent()->getOrInsertFunction("__stack_chk_fail", IRB.getVoidTy());
480  IRBFail.CreateCall(StackChkFail, {});
481 }
482 
483 /// We explicitly compute and set the unsafe stack layout for all unsafe
484 /// static alloca instructions. We save the unsafe "base pointer" in the
485 /// prologue into a local variable and restore it in the epilogue.
486 Value *SafeStack::moveStaticAllocasToUnsafeStack(
487  IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
488  ArrayRef<Argument *> ByValArguments, ArrayRef<ReturnInst *> Returns,
489  Instruction *BasePointer, AllocaInst *StackGuardSlot) {
490  if (StaticAllocas.empty() && ByValArguments.empty())
491  return BasePointer;
492 
493  DIBuilder DIB(*F.getParent());
494 
495  StackColoring SSC(F, StaticAllocas);
496  SSC.run();
497  SSC.removeAllMarkers();
498 
499  // Unsafe stack always grows down.
500  StackLayout SSL(StackAlignment);
501  if (StackGuardSlot) {
502  Type *Ty = StackGuardSlot->getAllocatedType();
503  unsigned Align =
504  std::max(DL.getPrefTypeAlignment(Ty), StackGuardSlot->getAlignment());
505  SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
506  Align, SSC.getFullLiveRange());
507  }
508 
509  for (Argument *Arg : ByValArguments) {
510  Type *Ty = Arg->getType()->getPointerElementType();
511  uint64_t Size = DL.getTypeStoreSize(Ty);
512  if (Size == 0)
513  Size = 1; // Don't create zero-sized stack objects.
514 
515  // Ensure the object is properly aligned.
516  unsigned Align = std::max((unsigned)DL.getPrefTypeAlignment(Ty),
517  Arg->getParamAlignment());
518  SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
519  }
520 
521  for (AllocaInst *AI : StaticAllocas) {
522  Type *Ty = AI->getAllocatedType();
523  uint64_t Size = getStaticAllocaAllocationSize(AI);
524  if (Size == 0)
525  Size = 1; // Don't create zero-sized stack objects.
526 
527  // Ensure the object is properly aligned.
528  unsigned Align =
529  std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment());
530 
531  SSL.addObject(AI, Size, Align, SSC.getLiveRange(AI));
532  }
533 
534  SSL.computeLayout();
535  unsigned FrameAlignment = SSL.getFrameAlignment();
536 
537  // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
538  // (AlignmentSkew).
539  if (FrameAlignment > StackAlignment) {
540  // Re-align the base pointer according to the max requested alignment.
541  assert(isPowerOf2_32(FrameAlignment));
542  IRB.SetInsertPoint(BasePointer->getNextNode());
543  BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
544  IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy),
545  ConstantInt::get(IntPtrTy, ~uint64_t(FrameAlignment - 1))),
546  StackPtrTy));
547  }
548 
549  IRB.SetInsertPoint(BasePointer->getNextNode());
550 
551  if (StackGuardSlot) {
552  unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
553  Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8*
554  ConstantInt::get(Int32Ty, -Offset));
555  Value *NewAI =
556  IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
557 
558  // Replace alloc with the new location.
559  StackGuardSlot->replaceAllUsesWith(NewAI);
560  StackGuardSlot->eraseFromParent();
561  }
562 
563  for (Argument *Arg : ByValArguments) {
564  unsigned Offset = SSL.getObjectOffset(Arg);
565  unsigned Align = SSL.getObjectAlignment(Arg);
566  Type *Ty = Arg->getType()->getPointerElementType();
567 
568  uint64_t Size = DL.getTypeStoreSize(Ty);
569  if (Size == 0)
570  Size = 1; // Don't create zero-sized stack objects.
571 
572  Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8*
573  ConstantInt::get(Int32Ty, -Offset));
574  Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
575  Arg->getName() + ".unsafe-byval");
576 
577  // Replace alloc with the new location.
578  replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB,
580  Arg->replaceAllUsesWith(NewArg);
581  IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode());
582  IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlignment(), Size);
583  }
584 
585  // Allocate space for every unsafe static AllocaInst on the unsafe stack.
586  for (AllocaInst *AI : StaticAllocas) {
587  IRB.SetInsertPoint(AI);
588  unsigned Offset = SSL.getObjectOffset(AI);
589 
590  uint64_t Size = getStaticAllocaAllocationSize(AI);
591  if (Size == 0)
592  Size = 1; // Don't create zero-sized stack objects.
593 
595  -Offset, DIExpression::NoDeref);
596  replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
597 
598  // Replace uses of the alloca with the new location.
599  // Insert address calculation close to each use to work around PR27844.
600  std::string Name = std::string(AI->getName()) + ".unsafe";
601  while (!AI->use_empty()) {
602  Use &U = *AI->use_begin();
603  Instruction *User = cast<Instruction>(U.getUser());
604 
605  Instruction *InsertBefore;
606  if (auto *PHI = dyn_cast<PHINode>(User))
607  InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
608  else
609  InsertBefore = User;
610 
611  IRBuilder<> IRBUser(InsertBefore);
612  Value *Off = IRBUser.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8*
613  ConstantInt::get(Int32Ty, -Offset));
614  Value *Replacement = IRBUser.CreateBitCast(Off, AI->getType(), Name);
615 
616  if (auto *PHI = dyn_cast<PHINode>(User)) {
617  // PHI nodes may have multiple incoming edges from the same BB (why??),
618  // all must be updated at once with the same incoming value.
619  auto *BB = PHI->getIncomingBlock(U);
620  for (unsigned I = 0; I < PHI->getNumIncomingValues(); ++I)
621  if (PHI->getIncomingBlock(I) == BB)
622  PHI->setIncomingValue(I, Replacement);
623  } else {
624  U.set(Replacement);
625  }
626  }
627 
628  AI->eraseFromParent();
629  }
630 
631  // Re-align BasePointer so that our callees would see it aligned as
632  // expected.
633  // FIXME: no need to update BasePointer in leaf functions.
634  unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
635 
636  // Update shadow stack pointer in the function epilogue.
637  IRB.SetInsertPoint(BasePointer->getNextNode());
638 
639  Value *StaticTop =
640  IRB.CreateGEP(Int8Ty, BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
641  "unsafe_stack_static_top");
642  IRB.CreateStore(StaticTop, UnsafeStackPtr);
643  return StaticTop;
644 }
645 
646 void SafeStack::moveDynamicAllocasToUnsafeStack(
647  Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
648  ArrayRef<AllocaInst *> DynamicAllocas) {
649  DIBuilder DIB(*F.getParent());
650 
651  for (AllocaInst *AI : DynamicAllocas) {
652  IRBuilder<> IRB(AI);
653 
654  // Compute the new SP value (after AI).
655  Value *ArraySize = AI->getArraySize();
656  if (ArraySize->getType() != IntPtrTy)
657  ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
658 
659  Type *Ty = AI->getAllocatedType();
660  uint64_t TySize = DL.getTypeAllocSize(Ty);
661  Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
662 
663  Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
664  IntPtrTy);
665  SP = IRB.CreateSub(SP, Size);
666 
667  // Align the SP value to satisfy the AllocaInst, type and stack alignments.
668  unsigned Align = std::max(
669  std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment()),
670  (unsigned)StackAlignment);
671 
672  assert(isPowerOf2_32(Align));
673  Value *NewTop = IRB.CreateIntToPtr(
674  IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))),
675  StackPtrTy);
676 
677  // Save the stack pointer.
678  IRB.CreateStore(NewTop, UnsafeStackPtr);
679  if (DynamicTop)
680  IRB.CreateStore(NewTop, DynamicTop);
681 
682  Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
683  if (AI->hasName() && isa<Instruction>(NewAI))
684  NewAI->takeName(AI);
685 
688  AI->replaceAllUsesWith(NewAI);
689  AI->eraseFromParent();
690  }
691 
692  if (!DynamicAllocas.empty()) {
693  // Now go through the instructions again, replacing stacksave/stackrestore.
694  for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) {
695  Instruction *I = &*(It++);
696  auto II = dyn_cast<IntrinsicInst>(I);
697  if (!II)
698  continue;
699 
700  if (II->getIntrinsicID() == Intrinsic::stacksave) {
701  IRBuilder<> IRB(II);
702  Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
703  LI->takeName(II);
704  II->replaceAllUsesWith(LI);
705  II->eraseFromParent();
706  } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
707  IRBuilder<> IRB(II);
708  Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
709  SI->takeName(II);
710  assert(II->use_empty());
711  II->eraseFromParent();
712  }
713  }
714  }
715 }
716 
717 bool SafeStack::ShouldInlinePointerAddress(CallSite &CS) {
719  if (CS.hasFnAttr(Attribute::AlwaysInline) && isInlineViable(*Callee))
720  return true;
721  if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
722  CS.isNoInline())
723  return false;
724  return true;
725 }
726 
727 void SafeStack::TryInlinePointerAddress() {
728  if (!isa<CallInst>(UnsafeStackPtr))
729  return;
730 
731  if(F.hasOptNone())
732  return;
733 
734  CallSite CS(UnsafeStackPtr);
736  if (!Callee || Callee->isDeclaration())
737  return;
738 
739  if (!ShouldInlinePointerAddress(CS))
740  return;
741 
742  InlineFunctionInfo IFI;
743  InlineFunction(CS, IFI);
744 }
745 
746 bool SafeStack::run() {
747  assert(F.hasFnAttribute(Attribute::SafeStack) &&
748  "Can't run SafeStack on a function without the attribute");
749  assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
750 
751  ++NumFunctions;
752 
753  SmallVector<AllocaInst *, 16> StaticAllocas;
754  SmallVector<AllocaInst *, 4> DynamicAllocas;
755  SmallVector<Argument *, 4> ByValArguments;
757 
758  // Collect all points where stack gets unwound and needs to be restored
759  // This is only necessary because the runtime (setjmp and unwind code) is
760  // not aware of the unsafe stack and won't unwind/restore it properly.
761  // To work around this problem without changing the runtime, we insert
762  // instrumentation to restore the unsafe stack pointer when necessary.
763  SmallVector<Instruction *, 4> StackRestorePoints;
764 
765  // Find all static and dynamic alloca instructions that must be moved to the
766  // unsafe stack, all return instructions and stack restore points.
767  findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
768  StackRestorePoints);
769 
770  if (StaticAllocas.empty() && DynamicAllocas.empty() &&
771  ByValArguments.empty() && StackRestorePoints.empty())
772  return false; // Nothing to do in this function.
773 
774  if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
775  !ByValArguments.empty())
776  ++NumUnsafeStackFunctions; // This function has the unsafe stack.
777 
778  if (!StackRestorePoints.empty())
779  ++NumUnsafeStackRestorePointsFunctions;
780 
781  IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
782  // Calls must always have a debug location, or else inlining breaks. So
783  // we explicitly set a artificial debug location here.
784  if (DISubprogram *SP = F.getSubprogram())
785  IRB.SetCurrentDebugLocation(DebugLoc::get(SP->getScopeLine(), 0, SP));
786  if (SafeStackUsePointerAddress) {
788  "__safestack_pointer_address", StackPtrTy->getPointerTo(0));
789  UnsafeStackPtr = IRB.CreateCall(Fn);
790  } else {
791  UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB);
792  }
793 
794  // Load the current stack pointer (we'll also use it as a base pointer).
795  // FIXME: use a dedicated register for it ?
796  Instruction *BasePointer =
797  IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
798  assert(BasePointer->getType() == StackPtrTy);
799 
800  AllocaInst *StackGuardSlot = nullptr;
801  // FIXME: implement weaker forms of stack protector.
802  if (F.hasFnAttribute(Attribute::StackProtect) ||
803  F.hasFnAttribute(Attribute::StackProtectStrong) ||
804  F.hasFnAttribute(Attribute::StackProtectReq)) {
805  Value *StackGuard = getStackGuard(IRB, F);
806  StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
807  IRB.CreateStore(StackGuard, StackGuardSlot);
808 
809  for (ReturnInst *RI : Returns) {
810  IRBuilder<> IRBRet(RI);
811  checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
812  }
813  }
814 
815  // The top of the unsafe stack after all unsafe static allocas are
816  // allocated.
817  Value *StaticTop =
818  moveStaticAllocasToUnsafeStack(IRB, F, StaticAllocas, ByValArguments,
819  Returns, BasePointer, StackGuardSlot);
820 
821  // Safe stack object that stores the current unsafe stack top. It is updated
822  // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
823  // This is only needed if we need to restore stack pointer after longjmp
824  // or exceptions, and we have dynamic allocations.
825  // FIXME: a better alternative might be to store the unsafe stack pointer
826  // before setjmp / invoke instructions.
827  AllocaInst *DynamicTop = createStackRestorePoints(
828  IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
829 
830  // Handle dynamic allocas.
831  moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
832  DynamicAllocas);
833 
834  // Restore the unsafe stack pointer before each return.
835  for (ReturnInst *RI : Returns) {
836  IRB.SetInsertPoint(RI);
837  IRB.CreateStore(BasePointer, UnsafeStackPtr);
838  }
839 
840  TryInlinePointerAddress();
841 
842  LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
843  return true;
844 }
845 
846 class SafeStackLegacyPass : public FunctionPass {
847  const TargetMachine *TM = nullptr;
848 
849 public:
850  static char ID; // Pass identification, replacement for typeid..
851 
852  SafeStackLegacyPass() : FunctionPass(ID) {
854  }
855 
856  void getAnalysisUsage(AnalysisUsage &AU) const override {
860  }
861 
862  bool runOnFunction(Function &F) override {
863  LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
864 
865  if (!F.hasFnAttribute(Attribute::SafeStack)) {
866  LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
867  " for this function\n");
868  return false;
869  }
870 
871  if (F.isDeclaration()) {
872  LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
873  " is not available\n");
874  return false;
875  }
876 
877  TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
878  auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
879  if (!TL)
880  report_fatal_error("TargetLowering instance is required");
881 
882  auto *DL = &F.getParent()->getDataLayout();
883  auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
884  auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
885 
886  // Compute DT and LI only for functions that have the attribute.
887  // This is only useful because the legacy pass manager doesn't let us
888  // compute analyzes lazily.
889  // In the backend pipeline, nothing preserves DT before SafeStack, so we
890  // would otherwise always compute it wastefully, even if there is no
891  // function with the safestack attribute.
892  DominatorTree DT(F);
893  LoopInfo LI(DT);
894 
895  ScalarEvolution SE(F, TLI, ACT, DT, LI);
896 
897  return SafeStack(F, *TL, *DL, SE).run();
898  }
899 };
900 
901 } // end anonymous namespace
902 
903 char SafeStackLegacyPass::ID = 0;
904 
905 INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, DEBUG_TYPE,
906  "Safe Stack instrumentation pass", false, false)
908 INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
909  "Safe Stack instrumentation pass", false, false)
910 
911 FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
IterTy arg_end() const
Definition: CallSite.h:583
uint64_t CallInst * C
Return a value (possibly void), from a function.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Safe Stack instrumentation pass
Definition: SafeStack.cpp:908
iterator_range< use_iterator > uses()
Definition: Value.h:354
#define DEBUG_TYPE
Definition: SafeStack.cpp:75
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:21
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
void addObject(const Value *V, unsigned Size, unsigned Alignment, const StackColoring::LiveRange &Range)
Add an object to the stack frame.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1379
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple llvm.dbg.value instructions when the alloca it describes is replaced with a new val...
Definition: Local.cpp:1587
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, bool DerefBefore, int Offset, bool DerefAfter)
Replaces llvm.dbg.declare instruction when the alloca it describes is replaced with a new value...
Definition: Local.cpp:1552
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1878
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
IterTy arg_begin() const
Definition: CallSite.h:579
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool hasOptNone() const
Do not optimize this function (-O0).
Definition: Function.h:594
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve &#39;CreateLoad(Ty, Ptr, "...")&#39; correctly, instead of converting the string to &#39;bool...
Definition: IRBuilder.h:1392
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
unsigned getFrameSize()
Returns the size of the entire frame.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:164
Compute live ranges of allocas.
The main scalar evolution driver.
An immutable pass that tracks lazily created AssumptionCache objects.
virtual const TargetLowering * getTargetLowering() const
bool isInterposable() const
Return true if this global&#39;s definition can be substituted with an arbitrary definition at link time...
Definition: GlobalValue.h:419
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:320
STATISTIC(NumFunctions, "Total number of functions")
Metadata node.
Definition: Metadata.h:863
F(f)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:111
InlineResult InlineFunction(CallInst *C, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true)
This function inlines the called function into the basic block of the caller.
This defines the Use class.
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:173
Value * getLength() const
INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, DEBUG_TYPE, "Safe Stack instrumentation pass", false, false) INITIALIZE_PASS_END(SafeStackLegacyPass
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: CallSite.h:597
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
inst_iterator inst_begin(Function *F)
Definition: InstIterator.h:131
unsigned getObjectOffset(const Value *V)
Returns the offset to the object start in the stack frame.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
Definition: SafeStack.cpp:911
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:742
This file contains the simple types necessary to represent the attributes associated with functions a...
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Definition: CallSite.h:454
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
Target-Independent Code Generator Pass Configuration Options.
This file implements a class to represent arbitrary precision integral constant values and operations...
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:379
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1421
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1762
Subprogram description.
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1767
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1066
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:150
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:291
iterator begin()
Definition: Function.h:658
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Definition: IRBuilder.h:126
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:769
static bool runOnFunction(Function &F, bool PostInlining)
xray instrumentation
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:427
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void set(Value *Val)
Definition: Value.h:670
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
bool hasName() const
Definition: Value.h:250
bool isNoInline() const
Return true if the call should not be inlined.
Definition: CallSite.h:446
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1507
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Compute the layout of an unsafe stack frame.
Represent the analysis usage information of a pass.
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1083
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
Value * CreateGEP(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &Name="")
Definition: IRBuilder.h:1493
This is the common base class for memset/memcpy/memmove.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
Type * getType() const
Return the LLVM type of this SCEV expression.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:1836
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
Module.h This file contains the declarations for the Module class.
This class represents a range of values.
Definition: ConstantRange.h:47
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
CallInst * CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:445
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
Definition: Module.cpp:143
User::const_op_iterator arg_iterator
The type of iterator to use when looping over actual arguments at this call site. ...
Definition: CallSite.h:220
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target&#39;s TargetSubtargetInf...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned getObjectAlignment(const Value *V)
Returns the alignment of the object.
Class for arbitrary precision integers.
Definition: APInt.h:69
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if this function has the given attribute.
Definition: CallSite.h:370
amdgpu Simplify well known AMD library false FunctionCallee Callee
void initializeSafeStackLegacyPassPass(PassRegistry &)
void computeLayout()
Run the layout computation for all previously added objects.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1813
unsigned getFrameAlignment()
Returns the alignment of the frame.
bool replaceDbgDeclare(Value *Address, Value *NewAddress, Instruction *InsertBefore, DIBuilder &Builder, bool DerefBefore, int Offset, bool DerefAfter)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value...
Definition: Local.cpp:1532
Virtual Register Rewriter
Definition: VirtRegMap.cpp:221
use_iterator use_begin()
Definition: Value.h:338
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:204
This class represents an analyzed expression in the program.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
Establish a view to a call site for examination.
Definition: CallSite.h:892
#define I(x, y, z)
Definition: MD5.cpp:58
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2009
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1199
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1757
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:205
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const BasicBlock & front() const
Definition: Function.h:665
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
inst_iterator inst_end(Function *F)
Definition: InstIterator.h:132
inst_range instructions(Function *F)
Definition: InstIterator.h:133
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:173
Value * getRawDest() const
bool use_empty() const
Definition: Value.h:322
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
This visitor recursively visits a SCEV expression and re-writes it.
static BranchProbability getBranchProbStackProtector(bool IsLikely)
iterator_range< arg_iterator > args()
Definition: Function.h:691
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
IntegerType * Int32Ty
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
This file describes how to lower LLVM code to machine code.
an instruction to allocate memory on the stack
Definition: Instructions.h:59