File: | lib/CodeGen/SafeStack.cpp |
Warning: | line 592, column 7 Value stored to 'Size' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- SafeStack.cpp - Safe Stack Insertion -------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass splits the stack into the safe stack (kept as-is for LLVM backend) |
10 | // and the unsafe stack (explicitly allocated and managed through the runtime |
11 | // support library). |
12 | // |
13 | // http://clang.llvm.org/docs/SafeStack.html |
14 | // |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #include "SafeStackColoring.h" |
18 | #include "SafeStackLayout.h" |
19 | #include "llvm/ADT/APInt.h" |
20 | #include "llvm/ADT/ArrayRef.h" |
21 | #include "llvm/ADT/SmallPtrSet.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/Statistic.h" |
24 | #include "llvm/Analysis/AssumptionCache.h" |
25 | #include "llvm/Analysis/BranchProbabilityInfo.h" |
26 | #include "llvm/Analysis/InlineCost.h" |
27 | #include "llvm/Analysis/LoopInfo.h" |
28 | #include "llvm/Analysis/ScalarEvolution.h" |
29 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
30 | #include "llvm/Analysis/TargetLibraryInfo.h" |
31 | #include "llvm/Transforms/Utils/Local.h" |
32 | #include "llvm/CodeGen/TargetLowering.h" |
33 | #include "llvm/CodeGen/TargetPassConfig.h" |
34 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
35 | #include "llvm/IR/Argument.h" |
36 | #include "llvm/IR/Attributes.h" |
37 | #include "llvm/IR/CallSite.h" |
38 | #include "llvm/IR/ConstantRange.h" |
39 | #include "llvm/IR/Constants.h" |
40 | #include "llvm/IR/DIBuilder.h" |
41 | #include "llvm/IR/DataLayout.h" |
42 | #include "llvm/IR/DerivedTypes.h" |
43 | #include "llvm/IR/Dominators.h" |
44 | #include "llvm/IR/Function.h" |
45 | #include "llvm/IR/IRBuilder.h" |
46 | #include "llvm/IR/InstIterator.h" |
47 | #include "llvm/IR/Instruction.h" |
48 | #include "llvm/IR/Instructions.h" |
49 | #include "llvm/IR/IntrinsicInst.h" |
50 | #include "llvm/IR/Intrinsics.h" |
51 | #include "llvm/IR/MDBuilder.h" |
52 | #include "llvm/IR/Module.h" |
53 | #include "llvm/IR/Type.h" |
54 | #include "llvm/IR/Use.h" |
55 | #include "llvm/IR/User.h" |
56 | #include "llvm/IR/Value.h" |
57 | #include "llvm/Pass.h" |
58 | #include "llvm/Support/Casting.h" |
59 | #include "llvm/Support/Debug.h" |
60 | #include "llvm/Support/ErrorHandling.h" |
61 | #include "llvm/Support/MathExtras.h" |
62 | #include "llvm/Support/raw_ostream.h" |
63 | #include "llvm/Target/TargetMachine.h" |
64 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
65 | #include "llvm/Transforms/Utils/Cloning.h" |
66 | #include <algorithm> |
67 | #include <cassert> |
68 | #include <cstdint> |
69 | #include <string> |
70 | #include <utility> |
71 | |
72 | using namespace llvm; |
73 | using namespace llvm::safestack; |
74 | |
75 | #define DEBUG_TYPE"safe-stack" "safe-stack" |
76 | |
77 | namespace llvm { |
78 | |
79 | STATISTIC(NumFunctions, "Total number of functions")static llvm::Statistic NumFunctions = {"safe-stack", "NumFunctions" , "Total number of functions", {0}, {false}}; |
80 | STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack")static llvm::Statistic NumUnsafeStackFunctions = {"safe-stack" , "NumUnsafeStackFunctions", "Number of functions with unsafe stack" , {0}, {false}}; |
81 | STATISTIC(NumUnsafeStackRestorePointsFunctions,static llvm::Statistic NumUnsafeStackRestorePointsFunctions = {"safe-stack", "NumUnsafeStackRestorePointsFunctions", "Number of functions that use setjmp or exceptions" , {0}, {false}} |
82 | "Number of functions that use setjmp or exceptions")static llvm::Statistic NumUnsafeStackRestorePointsFunctions = {"safe-stack", "NumUnsafeStackRestorePointsFunctions", "Number of functions that use setjmp or exceptions" , {0}, {false}}; |
83 | |
84 | STATISTIC(NumAllocas, "Total number of allocas")static llvm::Statistic NumAllocas = {"safe-stack", "NumAllocas" , "Total number of allocas", {0}, {false}}; |
85 | STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas")static llvm::Statistic NumUnsafeStaticAllocas = {"safe-stack" , "NumUnsafeStaticAllocas", "Number of unsafe static allocas" , {0}, {false}}; |
86 | STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas")static llvm::Statistic NumUnsafeDynamicAllocas = {"safe-stack" , "NumUnsafeDynamicAllocas", "Number of unsafe dynamic allocas" , {0}, {false}}; |
87 | STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments")static llvm::Statistic NumUnsafeByValArguments = {"safe-stack" , "NumUnsafeByValArguments", "Number of unsafe byval arguments" , {0}, {false}}; |
88 | STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads")static llvm::Statistic NumUnsafeStackRestorePoints = {"safe-stack" , "NumUnsafeStackRestorePoints", "Number of setjmps and landingpads" , {0}, {false}}; |
89 | |
90 | } // namespace llvm |
91 | |
92 | /// Use __safestack_pointer_address even if the platform has a faster way of |
93 | /// access safe stack pointer. |
94 | static cl::opt<bool> |
95 | SafeStackUsePointerAddress("safestack-use-pointer-address", |
96 | cl::init(false), cl::Hidden); |
97 | |
98 | |
99 | namespace { |
100 | |
101 | /// Rewrite an SCEV expression for a memory access address to an expression that |
102 | /// represents offset from the given alloca. |
103 | /// |
104 | /// The implementation simply replaces all mentions of the alloca with zero. |
105 | class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> { |
106 | const Value *AllocaPtr; |
107 | |
108 | public: |
109 | AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr) |
110 | : SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {} |
111 | |
112 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
113 | if (Expr->getValue() == AllocaPtr) |
114 | return SE.getZero(Expr->getType()); |
115 | return Expr; |
116 | } |
117 | }; |
118 | |
119 | /// The SafeStack pass splits the stack of each function into the safe |
120 | /// stack, which is only accessed through memory safe dereferences (as |
121 | /// determined statically), and the unsafe stack, which contains all |
122 | /// local variables that are accessed in ways that we can't prove to |
123 | /// be safe. |
124 | class SafeStack { |
125 | Function &F; |
126 | const TargetLoweringBase &TL; |
127 | const DataLayout &DL; |
128 | ScalarEvolution &SE; |
129 | |
130 | Type *StackPtrTy; |
131 | Type *IntPtrTy; |
132 | Type *Int32Ty; |
133 | Type *Int8Ty; |
134 | |
135 | Value *UnsafeStackPtr = nullptr; |
136 | |
137 | /// Unsafe stack alignment. Each stack frame must ensure that the stack is |
138 | /// aligned to this value. We need to re-align the unsafe stack if the |
139 | /// alignment of any object on the stack exceeds this value. |
140 | /// |
141 | /// 16 seems like a reasonable upper bound on the alignment of objects that we |
142 | /// might expect to appear on the stack on most common targets. |
143 | enum { StackAlignment = 16 }; |
144 | |
145 | /// Return the value of the stack canary. |
146 | Value *getStackGuard(IRBuilder<> &IRB, Function &F); |
147 | |
148 | /// Load stack guard from the frame and check if it has changed. |
149 | void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI, |
150 | AllocaInst *StackGuardSlot, Value *StackGuard); |
151 | |
152 | /// Find all static allocas, dynamic allocas, return instructions and |
153 | /// stack restore points (exception unwind blocks and setjmp calls) in the |
154 | /// given function and append them to the respective vectors. |
155 | void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas, |
156 | SmallVectorImpl<AllocaInst *> &DynamicAllocas, |
157 | SmallVectorImpl<Argument *> &ByValArguments, |
158 | SmallVectorImpl<ReturnInst *> &Returns, |
159 | SmallVectorImpl<Instruction *> &StackRestorePoints); |
160 | |
161 | /// Calculate the allocation size of a given alloca. Returns 0 if the |
162 | /// size can not be statically determined. |
163 | uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); |
164 | |
165 | /// Allocate space for all static allocas in \p StaticAllocas, |
166 | /// replace allocas with pointers into the unsafe stack and generate code to |
167 | /// restore the stack pointer before all return instructions in \p Returns. |
168 | /// |
169 | /// \returns A pointer to the top of the unsafe stack after all unsafe static |
170 | /// allocas are allocated. |
171 | Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F, |
172 | ArrayRef<AllocaInst *> StaticAllocas, |
173 | ArrayRef<Argument *> ByValArguments, |
174 | ArrayRef<ReturnInst *> Returns, |
175 | Instruction *BasePointer, |
176 | AllocaInst *StackGuardSlot); |
177 | |
178 | /// Generate code to restore the stack after all stack restore points |
179 | /// in \p StackRestorePoints. |
180 | /// |
181 | /// \returns A local variable in which to maintain the dynamic top of the |
182 | /// unsafe stack if needed. |
183 | AllocaInst * |
184 | createStackRestorePoints(IRBuilder<> &IRB, Function &F, |
185 | ArrayRef<Instruction *> StackRestorePoints, |
186 | Value *StaticTop, bool NeedDynamicTop); |
187 | |
188 | /// Replace all allocas in \p DynamicAllocas with code to allocate |
189 | /// space dynamically on the unsafe stack and store the dynamic unsafe stack |
190 | /// top to \p DynamicTop if non-null. |
191 | void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr, |
192 | AllocaInst *DynamicTop, |
193 | ArrayRef<AllocaInst *> DynamicAllocas); |
194 | |
195 | bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize); |
196 | |
197 | bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U, |
198 | const Value *AllocaPtr, uint64_t AllocaSize); |
199 | bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr, |
200 | uint64_t AllocaSize); |
201 | |
202 | bool ShouldInlinePointerAddress(CallSite &CS); |
203 | void TryInlinePointerAddress(); |
204 | |
205 | public: |
206 | SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL, |
207 | ScalarEvolution &SE) |
208 | : F(F), TL(TL), DL(DL), SE(SE), |
209 | StackPtrTy(Type::getInt8PtrTy(F.getContext())), |
210 | IntPtrTy(DL.getIntPtrType(F.getContext())), |
211 | Int32Ty(Type::getInt32Ty(F.getContext())), |
212 | Int8Ty(Type::getInt8Ty(F.getContext())) {} |
213 | |
214 | // Run the transformation on the associated function. |
215 | // Returns whether the function was changed. |
216 | bool run(); |
217 | }; |
218 | |
219 | uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) { |
220 | uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType()); |
221 | if (AI->isArrayAllocation()) { |
222 | auto C = dyn_cast<ConstantInt>(AI->getArraySize()); |
223 | if (!C) |
224 | return 0; |
225 | Size *= C->getZExtValue(); |
226 | } |
227 | return Size; |
228 | } |
229 | |
230 | bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize, |
231 | const Value *AllocaPtr, uint64_t AllocaSize) { |
232 | AllocaOffsetRewriter Rewriter(SE, AllocaPtr); |
233 | const SCEV *Expr = Rewriter.visit(SE.getSCEV(Addr)); |
234 | |
235 | uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType()); |
236 | ConstantRange AccessStartRange = SE.getUnsignedRange(Expr); |
237 | ConstantRange SizeRange = |
238 | ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize)); |
239 | ConstantRange AccessRange = AccessStartRange.add(SizeRange); |
240 | ConstantRange AllocaRange = |
241 | ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize)); |
242 | bool Safe = AllocaRange.contains(AccessRange); |
243 | |
244 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
245 | dbgs() << "[SafeStack] "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
246 | << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
247 | << *AllocaPtr << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
248 | << " Access " << *Addr << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
249 | << " SCEV " << *Exprdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
250 | << " U: " << SE.getUnsignedRange(Expr)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
251 | << ", S: " << SE.getSignedRange(Expr) << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
252 | << " Range " << AccessRange << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
253 | << " AllocaRange " << AllocaRange << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false) |
254 | << " " << (Safe ? "safe" : "unsafe") << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] " << (isa <AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") << *AllocaPtr << "\n" << " Access " << *Addr << "\n" << " SCEV " << *Expr << " U: " << SE.getUnsignedRange(Expr) << ", S: " << SE.getSignedRange(Expr) << "\n" << " Range " << AccessRange << "\n" << " AllocaRange " << AllocaRange << "\n" << " " << (Safe ? "safe" : "unsafe") << "\n"; } } while (false); |
255 | |
256 | return Safe; |
257 | } |
258 | |
259 | bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U, |
260 | const Value *AllocaPtr, |
261 | uint64_t AllocaSize) { |
262 | if (auto MTI = dyn_cast<MemTransferInst>(MI)) { |
263 | if (MTI->getRawSource() != U && MTI->getRawDest() != U) |
264 | return true; |
265 | } else { |
266 | if (MI->getRawDest() != U) |
267 | return true; |
268 | } |
269 | |
270 | const auto *Len = dyn_cast<ConstantInt>(MI->getLength()); |
271 | // Non-constant size => unsafe. FIXME: try SCEV getRange. |
272 | if (!Len) return false; |
273 | return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize); |
274 | } |
275 | |
276 | /// Check whether a given allocation must be put on the safe |
277 | /// stack or not. The function analyzes all uses of AI and checks whether it is |
278 | /// only accessed in a memory safe way (as decided statically). |
279 | bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) { |
280 | // Go through all uses of this alloca and check whether all accesses to the |
281 | // allocated object are statically known to be memory safe and, hence, the |
282 | // object can be placed on the safe stack. |
283 | SmallPtrSet<const Value *, 16> Visited; |
284 | SmallVector<const Value *, 8> WorkList; |
285 | WorkList.push_back(AllocaPtr); |
286 | |
287 | // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc. |
288 | while (!WorkList.empty()) { |
289 | const Value *V = WorkList.pop_back_val(); |
290 | for (const Use &UI : V->uses()) { |
291 | auto I = cast<const Instruction>(UI.getUser()); |
292 | assert(V == UI.get())((V == UI.get()) ? static_cast<void> (0) : __assert_fail ("V == UI.get()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 292, __PRETTY_FUNCTION__)); |
293 | |
294 | switch (I->getOpcode()) { |
295 | case Instruction::Load: |
296 | if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr, |
297 | AllocaSize)) |
298 | return false; |
299 | break; |
300 | |
301 | case Instruction::VAArg: |
302 | // "va-arg" from a pointer is safe. |
303 | break; |
304 | case Instruction::Store: |
305 | if (V == I->getOperand(0)) { |
306 | // Stored the pointer - conservatively assume it may be unsafe. |
307 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n store of address: " << *I << "\n"; } } while (false) |
308 | << "[SafeStack] Unsafe alloca: " << *AllocaPtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n store of address: " << *I << "\n"; } } while (false) |
309 | << "\n store of address: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n store of address: " << *I << "\n"; } } while (false); |
310 | return false; |
311 | } |
312 | |
313 | if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()), |
314 | AllocaPtr, AllocaSize)) |
315 | return false; |
316 | break; |
317 | |
318 | case Instruction::Ret: |
319 | // Information leak. |
320 | return false; |
321 | |
322 | case Instruction::Call: |
323 | case Instruction::Invoke: { |
324 | ImmutableCallSite CS(I); |
325 | |
326 | if (I->isLifetimeStartOrEnd()) |
327 | continue; |
328 | |
329 | if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) { |
330 | if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) { |
331 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe memintrinsic: " << *I << "\n"; } } while (false) |
332 | << "[SafeStack] Unsafe alloca: " << *AllocaPtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe memintrinsic: " << *I << "\n"; } } while (false) |
333 | << "\n unsafe memintrinsic: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe memintrinsic: " << *I << "\n"; } } while (false); |
334 | return false; |
335 | } |
336 | continue; |
337 | } |
338 | |
339 | // LLVM 'nocapture' attribute is only set for arguments whose address |
340 | // is not stored, passed around, or used in any other non-trivial way. |
341 | // We assume that passing a pointer to an object as a 'nocapture |
342 | // readnone' argument is safe. |
343 | // FIXME: a more precise solution would require an interprocedural |
344 | // analysis here, which would look at all uses of an argument inside |
345 | // the function being called. |
346 | ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end(); |
347 | for (ImmutableCallSite::arg_iterator A = B; A != E; ++A) |
348 | if (A->get() == V) |
349 | if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) || |
350 | CS.doesNotAccessMemory()))) { |
351 | LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe call: " << *I << "\n"; } } while (false) |
352 | << "\n unsafe call: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe call: " << *I << "\n"; } } while (false); |
353 | return false; |
354 | } |
355 | continue; |
356 | } |
357 | |
358 | default: |
359 | if (Visited.insert(I).second) |
360 | WorkList.push_back(cast<const Instruction>(I)); |
361 | } |
362 | } |
363 | } |
364 | |
365 | // All uses of the alloca are safe, we can place it on the safe stack. |
366 | return true; |
367 | } |
368 | |
369 | Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) { |
370 | Value *StackGuardVar = TL.getIRStackGuard(IRB); |
371 | if (!StackGuardVar) |
372 | StackGuardVar = |
373 | F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy); |
374 | return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard"); |
375 | } |
376 | |
377 | void SafeStack::findInsts(Function &F, |
378 | SmallVectorImpl<AllocaInst *> &StaticAllocas, |
379 | SmallVectorImpl<AllocaInst *> &DynamicAllocas, |
380 | SmallVectorImpl<Argument *> &ByValArguments, |
381 | SmallVectorImpl<ReturnInst *> &Returns, |
382 | SmallVectorImpl<Instruction *> &StackRestorePoints) { |
383 | for (Instruction &I : instructions(&F)) { |
384 | if (auto AI = dyn_cast<AllocaInst>(&I)) { |
385 | ++NumAllocas; |
386 | |
387 | uint64_t Size = getStaticAllocaAllocationSize(AI); |
388 | if (IsSafeStackAlloca(AI, Size)) |
389 | continue; |
390 | |
391 | if (AI->isStaticAlloca()) { |
392 | ++NumUnsafeStaticAllocas; |
393 | StaticAllocas.push_back(AI); |
394 | } else { |
395 | ++NumUnsafeDynamicAllocas; |
396 | DynamicAllocas.push_back(AI); |
397 | } |
398 | } else if (auto RI = dyn_cast<ReturnInst>(&I)) { |
399 | Returns.push_back(RI); |
400 | } else if (auto CI = dyn_cast<CallInst>(&I)) { |
401 | // setjmps require stack restore. |
402 | if (CI->getCalledFunction() && CI->canReturnTwice()) |
403 | StackRestorePoints.push_back(CI); |
404 | } else if (auto LP = dyn_cast<LandingPadInst>(&I)) { |
405 | // Exception landing pads require stack restore. |
406 | StackRestorePoints.push_back(LP); |
407 | } else if (auto II = dyn_cast<IntrinsicInst>(&I)) { |
408 | if (II->getIntrinsicID() == Intrinsic::gcroot) |
409 | report_fatal_error( |
410 | "gcroot intrinsic not compatible with safestack attribute"); |
411 | } |
412 | } |
413 | for (Argument &Arg : F.args()) { |
414 | if (!Arg.hasByValAttr()) |
415 | continue; |
416 | uint64_t Size = |
417 | DL.getTypeStoreSize(Arg.getType()->getPointerElementType()); |
418 | if (IsSafeStackAlloca(&Arg, Size)) |
419 | continue; |
420 | |
421 | ++NumUnsafeByValArguments; |
422 | ByValArguments.push_back(&Arg); |
423 | } |
424 | } |
425 | |
426 | AllocaInst * |
427 | SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F, |
428 | ArrayRef<Instruction *> StackRestorePoints, |
429 | Value *StaticTop, bool NeedDynamicTop) { |
430 | assert(StaticTop && "The stack top isn't set.")((StaticTop && "The stack top isn't set.") ? static_cast <void> (0) : __assert_fail ("StaticTop && \"The stack top isn't set.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 430, __PRETTY_FUNCTION__)); |
431 | |
432 | if (StackRestorePoints.empty()) |
433 | return nullptr; |
434 | |
435 | // We need the current value of the shadow stack pointer to restore |
436 | // after longjmp or exception catching. |
437 | |
438 | // FIXME: On some platforms this could be handled by the longjmp/exception |
439 | // runtime itself. |
440 | |
441 | AllocaInst *DynamicTop = nullptr; |
442 | if (NeedDynamicTop) { |
443 | // If we also have dynamic alloca's, the stack pointer value changes |
444 | // throughout the function. For now we store it in an alloca. |
445 | DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr, |
446 | "unsafe_stack_dynamic_ptr"); |
447 | IRB.CreateStore(StaticTop, DynamicTop); |
448 | } |
449 | |
450 | // Restore current stack pointer after longjmp/exception catch. |
451 | for (Instruction *I : StackRestorePoints) { |
452 | ++NumUnsafeStackRestorePoints; |
453 | |
454 | IRB.SetInsertPoint(I->getNextNode()); |
455 | Value *CurrentTop = |
456 | DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop; |
457 | IRB.CreateStore(CurrentTop, UnsafeStackPtr); |
458 | } |
459 | |
460 | return DynamicTop; |
461 | } |
462 | |
463 | void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI, |
464 | AllocaInst *StackGuardSlot, Value *StackGuard) { |
465 | Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot); |
466 | Value *Cmp = IRB.CreateICmpNE(StackGuard, V); |
467 | |
468 | auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true); |
469 | auto FailureProb = BranchProbabilityInfo::getBranchProbStackProtector(false); |
470 | MDNode *Weights = MDBuilder(F.getContext()) |
471 | .createBranchWeights(SuccessProb.getNumerator(), |
472 | FailureProb.getNumerator()); |
473 | Instruction *CheckTerm = |
474 | SplitBlockAndInsertIfThen(Cmp, &RI, |
475 | /* Unreachable */ true, Weights); |
476 | IRBuilder<> IRBFail(CheckTerm); |
477 | // FIXME: respect -fsanitize-trap / -ftrap-function here? |
478 | FunctionCallee StackChkFail = |
479 | F.getParent()->getOrInsertFunction("__stack_chk_fail", IRB.getVoidTy()); |
480 | IRBFail.CreateCall(StackChkFail, {}); |
481 | } |
482 | |
483 | /// We explicitly compute and set the unsafe stack layout for all unsafe |
484 | /// static alloca instructions. We save the unsafe "base pointer" in the |
485 | /// prologue into a local variable and restore it in the epilogue. |
486 | Value *SafeStack::moveStaticAllocasToUnsafeStack( |
487 | IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas, |
488 | ArrayRef<Argument *> ByValArguments, ArrayRef<ReturnInst *> Returns, |
489 | Instruction *BasePointer, AllocaInst *StackGuardSlot) { |
490 | if (StaticAllocas.empty() && ByValArguments.empty()) |
491 | return BasePointer; |
492 | |
493 | DIBuilder DIB(*F.getParent()); |
494 | |
495 | StackColoring SSC(F, StaticAllocas); |
496 | SSC.run(); |
497 | SSC.removeAllMarkers(); |
498 | |
499 | // Unsafe stack always grows down. |
500 | StackLayout SSL(StackAlignment); |
501 | if (StackGuardSlot) { |
502 | Type *Ty = StackGuardSlot->getAllocatedType(); |
503 | unsigned Align = |
504 | std::max(DL.getPrefTypeAlignment(Ty), StackGuardSlot->getAlignment()); |
505 | SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot), |
506 | Align, SSC.getFullLiveRange()); |
507 | } |
508 | |
509 | for (Argument *Arg : ByValArguments) { |
510 | Type *Ty = Arg->getType()->getPointerElementType(); |
511 | uint64_t Size = DL.getTypeStoreSize(Ty); |
512 | if (Size == 0) |
513 | Size = 1; // Don't create zero-sized stack objects. |
514 | |
515 | // Ensure the object is properly aligned. |
516 | unsigned Align = std::max((unsigned)DL.getPrefTypeAlignment(Ty), |
517 | Arg->getParamAlignment()); |
518 | SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange()); |
519 | } |
520 | |
521 | for (AllocaInst *AI : StaticAllocas) { |
522 | Type *Ty = AI->getAllocatedType(); |
523 | uint64_t Size = getStaticAllocaAllocationSize(AI); |
524 | if (Size == 0) |
525 | Size = 1; // Don't create zero-sized stack objects. |
526 | |
527 | // Ensure the object is properly aligned. |
528 | unsigned Align = |
529 | std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment()); |
530 | |
531 | SSL.addObject(AI, Size, Align, SSC.getLiveRange(AI)); |
532 | } |
533 | |
534 | SSL.computeLayout(); |
535 | unsigned FrameAlignment = SSL.getFrameAlignment(); |
536 | |
537 | // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location |
538 | // (AlignmentSkew). |
539 | if (FrameAlignment > StackAlignment) { |
540 | // Re-align the base pointer according to the max requested alignment. |
541 | assert(isPowerOf2_32(FrameAlignment))((isPowerOf2_32(FrameAlignment)) ? static_cast<void> (0 ) : __assert_fail ("isPowerOf2_32(FrameAlignment)", "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 541, __PRETTY_FUNCTION__)); |
542 | IRB.SetInsertPoint(BasePointer->getNextNode()); |
543 | BasePointer = cast<Instruction>(IRB.CreateIntToPtr( |
544 | IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy), |
545 | ConstantInt::get(IntPtrTy, ~uint64_t(FrameAlignment - 1))), |
546 | StackPtrTy)); |
547 | } |
548 | |
549 | IRB.SetInsertPoint(BasePointer->getNextNode()); |
550 | |
551 | if (StackGuardSlot) { |
552 | unsigned Offset = SSL.getObjectOffset(StackGuardSlot); |
553 | Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8* |
554 | ConstantInt::get(Int32Ty, -Offset)); |
555 | Value *NewAI = |
556 | IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot"); |
557 | |
558 | // Replace alloc with the new location. |
559 | StackGuardSlot->replaceAllUsesWith(NewAI); |
560 | StackGuardSlot->eraseFromParent(); |
561 | } |
562 | |
563 | for (Argument *Arg : ByValArguments) { |
564 | unsigned Offset = SSL.getObjectOffset(Arg); |
565 | unsigned Align = SSL.getObjectAlignment(Arg); |
566 | Type *Ty = Arg->getType()->getPointerElementType(); |
567 | |
568 | uint64_t Size = DL.getTypeStoreSize(Ty); |
569 | if (Size == 0) |
570 | Size = 1; // Don't create zero-sized stack objects. |
571 | |
572 | Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8* |
573 | ConstantInt::get(Int32Ty, -Offset)); |
574 | Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(), |
575 | Arg->getName() + ".unsafe-byval"); |
576 | |
577 | // Replace alloc with the new location. |
578 | replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB, |
579 | DIExpression::ApplyOffset, -Offset); |
580 | Arg->replaceAllUsesWith(NewArg); |
581 | IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode()); |
582 | IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlignment(), Size); |
583 | } |
584 | |
585 | // Allocate space for every unsafe static AllocaInst on the unsafe stack. |
586 | for (AllocaInst *AI : StaticAllocas) { |
587 | IRB.SetInsertPoint(AI); |
588 | unsigned Offset = SSL.getObjectOffset(AI); |
589 | |
590 | uint64_t Size = getStaticAllocaAllocationSize(AI); |
591 | if (Size == 0) |
592 | Size = 1; // Don't create zero-sized stack objects. |
Value stored to 'Size' is never read | |
593 | |
594 | replaceDbgDeclareForAlloca(AI, BasePointer, DIB, DIExpression::ApplyOffset, |
595 | -Offset); |
596 | replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset); |
597 | |
598 | // Replace uses of the alloca with the new location. |
599 | // Insert address calculation close to each use to work around PR27844. |
600 | std::string Name = std::string(AI->getName()) + ".unsafe"; |
601 | while (!AI->use_empty()) { |
602 | Use &U = *AI->use_begin(); |
603 | Instruction *User = cast<Instruction>(U.getUser()); |
604 | |
605 | Instruction *InsertBefore; |
606 | if (auto *PHI = dyn_cast<PHINode>(User)) |
607 | InsertBefore = PHI->getIncomingBlock(U)->getTerminator(); |
608 | else |
609 | InsertBefore = User; |
610 | |
611 | IRBuilder<> IRBUser(InsertBefore); |
612 | Value *Off = IRBUser.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8* |
613 | ConstantInt::get(Int32Ty, -Offset)); |
614 | Value *Replacement = IRBUser.CreateBitCast(Off, AI->getType(), Name); |
615 | |
616 | if (auto *PHI = dyn_cast<PHINode>(User)) { |
617 | // PHI nodes may have multiple incoming edges from the same BB (why??), |
618 | // all must be updated at once with the same incoming value. |
619 | auto *BB = PHI->getIncomingBlock(U); |
620 | for (unsigned I = 0; I < PHI->getNumIncomingValues(); ++I) |
621 | if (PHI->getIncomingBlock(I) == BB) |
622 | PHI->setIncomingValue(I, Replacement); |
623 | } else { |
624 | U.set(Replacement); |
625 | } |
626 | } |
627 | |
628 | AI->eraseFromParent(); |
629 | } |
630 | |
631 | // Re-align BasePointer so that our callees would see it aligned as |
632 | // expected. |
633 | // FIXME: no need to update BasePointer in leaf functions. |
634 | unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment); |
635 | |
636 | // Update shadow stack pointer in the function epilogue. |
637 | IRB.SetInsertPoint(BasePointer->getNextNode()); |
638 | |
639 | Value *StaticTop = |
640 | IRB.CreateGEP(Int8Ty, BasePointer, ConstantInt::get(Int32Ty, -FrameSize), |
641 | "unsafe_stack_static_top"); |
642 | IRB.CreateStore(StaticTop, UnsafeStackPtr); |
643 | return StaticTop; |
644 | } |
645 | |
646 | void SafeStack::moveDynamicAllocasToUnsafeStack( |
647 | Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop, |
648 | ArrayRef<AllocaInst *> DynamicAllocas) { |
649 | DIBuilder DIB(*F.getParent()); |
650 | |
651 | for (AllocaInst *AI : DynamicAllocas) { |
652 | IRBuilder<> IRB(AI); |
653 | |
654 | // Compute the new SP value (after AI). |
655 | Value *ArraySize = AI->getArraySize(); |
656 | if (ArraySize->getType() != IntPtrTy) |
657 | ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false); |
658 | |
659 | Type *Ty = AI->getAllocatedType(); |
660 | uint64_t TySize = DL.getTypeAllocSize(Ty); |
661 | Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize)); |
662 | |
663 | Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr), |
664 | IntPtrTy); |
665 | SP = IRB.CreateSub(SP, Size); |
666 | |
667 | // Align the SP value to satisfy the AllocaInst, type and stack alignments. |
668 | unsigned Align = std::max( |
669 | std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment()), |
670 | (unsigned)StackAlignment); |
671 | |
672 | assert(isPowerOf2_32(Align))((isPowerOf2_32(Align)) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(Align)", "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 672, __PRETTY_FUNCTION__)); |
673 | Value *NewTop = IRB.CreateIntToPtr( |
674 | IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))), |
675 | StackPtrTy); |
676 | |
677 | // Save the stack pointer. |
678 | IRB.CreateStore(NewTop, UnsafeStackPtr); |
679 | if (DynamicTop) |
680 | IRB.CreateStore(NewTop, DynamicTop); |
681 | |
682 | Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType()); |
683 | if (AI->hasName() && isa<Instruction>(NewAI)) |
684 | NewAI->takeName(AI); |
685 | |
686 | replaceDbgDeclareForAlloca(AI, NewAI, DIB, DIExpression::ApplyOffset, 0); |
687 | AI->replaceAllUsesWith(NewAI); |
688 | AI->eraseFromParent(); |
689 | } |
690 | |
691 | if (!DynamicAllocas.empty()) { |
692 | // Now go through the instructions again, replacing stacksave/stackrestore. |
693 | for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) { |
694 | Instruction *I = &*(It++); |
695 | auto II = dyn_cast<IntrinsicInst>(I); |
696 | if (!II) |
697 | continue; |
698 | |
699 | if (II->getIntrinsicID() == Intrinsic::stacksave) { |
700 | IRBuilder<> IRB(II); |
701 | Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr); |
702 | LI->takeName(II); |
703 | II->replaceAllUsesWith(LI); |
704 | II->eraseFromParent(); |
705 | } else if (II->getIntrinsicID() == Intrinsic::stackrestore) { |
706 | IRBuilder<> IRB(II); |
707 | Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr); |
708 | SI->takeName(II); |
709 | assert(II->use_empty())((II->use_empty()) ? static_cast<void> (0) : __assert_fail ("II->use_empty()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 709, __PRETTY_FUNCTION__)); |
710 | II->eraseFromParent(); |
711 | } |
712 | } |
713 | } |
714 | } |
715 | |
716 | bool SafeStack::ShouldInlinePointerAddress(CallSite &CS) { |
717 | Function *Callee = CS.getCalledFunction(); |
718 | if (CS.hasFnAttr(Attribute::AlwaysInline) && isInlineViable(*Callee)) |
719 | return true; |
720 | if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) || |
721 | CS.isNoInline()) |
722 | return false; |
723 | return true; |
724 | } |
725 | |
726 | void SafeStack::TryInlinePointerAddress() { |
727 | if (!isa<CallInst>(UnsafeStackPtr)) |
728 | return; |
729 | |
730 | if(F.hasOptNone()) |
731 | return; |
732 | |
733 | CallSite CS(UnsafeStackPtr); |
734 | Function *Callee = CS.getCalledFunction(); |
735 | if (!Callee || Callee->isDeclaration()) |
736 | return; |
737 | |
738 | if (!ShouldInlinePointerAddress(CS)) |
739 | return; |
740 | |
741 | InlineFunctionInfo IFI; |
742 | InlineFunction(CS, IFI); |
743 | } |
744 | |
745 | bool SafeStack::run() { |
746 | assert(F.hasFnAttribute(Attribute::SafeStack) &&((F.hasFnAttribute(Attribute::SafeStack) && "Can't run SafeStack on a function without the attribute" ) ? static_cast<void> (0) : __assert_fail ("F.hasFnAttribute(Attribute::SafeStack) && \"Can't run SafeStack on a function without the attribute\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 747, __PRETTY_FUNCTION__)) |
747 | "Can't run SafeStack on a function without the attribute")((F.hasFnAttribute(Attribute::SafeStack) && "Can't run SafeStack on a function without the attribute" ) ? static_cast<void> (0) : __assert_fail ("F.hasFnAttribute(Attribute::SafeStack) && \"Can't run SafeStack on a function without the attribute\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 747, __PRETTY_FUNCTION__)); |
748 | assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration")((!F.isDeclaration() && "Can't run SafeStack on a function declaration" ) ? static_cast<void> (0) : __assert_fail ("!F.isDeclaration() && \"Can't run SafeStack on a function declaration\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 748, __PRETTY_FUNCTION__)); |
749 | |
750 | ++NumFunctions; |
751 | |
752 | SmallVector<AllocaInst *, 16> StaticAllocas; |
753 | SmallVector<AllocaInst *, 4> DynamicAllocas; |
754 | SmallVector<Argument *, 4> ByValArguments; |
755 | SmallVector<ReturnInst *, 4> Returns; |
756 | |
757 | // Collect all points where stack gets unwound and needs to be restored |
758 | // This is only necessary because the runtime (setjmp and unwind code) is |
759 | // not aware of the unsafe stack and won't unwind/restore it properly. |
760 | // To work around this problem without changing the runtime, we insert |
761 | // instrumentation to restore the unsafe stack pointer when necessary. |
762 | SmallVector<Instruction *, 4> StackRestorePoints; |
763 | |
764 | // Find all static and dynamic alloca instructions that must be moved to the |
765 | // unsafe stack, all return instructions and stack restore points. |
766 | findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns, |
767 | StackRestorePoints); |
768 | |
769 | if (StaticAllocas.empty() && DynamicAllocas.empty() && |
770 | ByValArguments.empty() && StackRestorePoints.empty()) |
771 | return false; // Nothing to do in this function. |
772 | |
773 | if (!StaticAllocas.empty() || !DynamicAllocas.empty() || |
774 | !ByValArguments.empty()) |
775 | ++NumUnsafeStackFunctions; // This function has the unsafe stack. |
776 | |
777 | if (!StackRestorePoints.empty()) |
778 | ++NumUnsafeStackRestorePointsFunctions; |
779 | |
780 | IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt()); |
781 | // Calls must always have a debug location, or else inlining breaks. So |
782 | // we explicitly set a artificial debug location here. |
783 | if (DISubprogram *SP = F.getSubprogram()) |
784 | IRB.SetCurrentDebugLocation(DebugLoc::get(SP->getScopeLine(), 0, SP)); |
785 | if (SafeStackUsePointerAddress) { |
786 | FunctionCallee Fn = F.getParent()->getOrInsertFunction( |
787 | "__safestack_pointer_address", StackPtrTy->getPointerTo(0)); |
788 | UnsafeStackPtr = IRB.CreateCall(Fn); |
789 | } else { |
790 | UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB); |
791 | } |
792 | |
793 | // Load the current stack pointer (we'll also use it as a base pointer). |
794 | // FIXME: use a dedicated register for it ? |
795 | Instruction *BasePointer = |
796 | IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr"); |
797 | assert(BasePointer->getType() == StackPtrTy)((BasePointer->getType() == StackPtrTy) ? static_cast<void > (0) : __assert_fail ("BasePointer->getType() == StackPtrTy" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/SafeStack.cpp" , 797, __PRETTY_FUNCTION__)); |
798 | |
799 | AllocaInst *StackGuardSlot = nullptr; |
800 | // FIXME: implement weaker forms of stack protector. |
801 | if (F.hasFnAttribute(Attribute::StackProtect) || |
802 | F.hasFnAttribute(Attribute::StackProtectStrong) || |
803 | F.hasFnAttribute(Attribute::StackProtectReq)) { |
804 | Value *StackGuard = getStackGuard(IRB, F); |
805 | StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr); |
806 | IRB.CreateStore(StackGuard, StackGuardSlot); |
807 | |
808 | for (ReturnInst *RI : Returns) { |
809 | IRBuilder<> IRBRet(RI); |
810 | checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard); |
811 | } |
812 | } |
813 | |
814 | // The top of the unsafe stack after all unsafe static allocas are |
815 | // allocated. |
816 | Value *StaticTop = |
817 | moveStaticAllocasToUnsafeStack(IRB, F, StaticAllocas, ByValArguments, |
818 | Returns, BasePointer, StackGuardSlot); |
819 | |
820 | // Safe stack object that stores the current unsafe stack top. It is updated |
821 | // as unsafe dynamic (non-constant-sized) allocas are allocated and freed. |
822 | // This is only needed if we need to restore stack pointer after longjmp |
823 | // or exceptions, and we have dynamic allocations. |
824 | // FIXME: a better alternative might be to store the unsafe stack pointer |
825 | // before setjmp / invoke instructions. |
826 | AllocaInst *DynamicTop = createStackRestorePoints( |
827 | IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty()); |
828 | |
829 | // Handle dynamic allocas. |
830 | moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop, |
831 | DynamicAllocas); |
832 | |
833 | // Restore the unsafe stack pointer before each return. |
834 | for (ReturnInst *RI : Returns) { |
835 | IRB.SetInsertPoint(RI); |
836 | IRB.CreateStore(BasePointer, UnsafeStackPtr); |
837 | } |
838 | |
839 | TryInlinePointerAddress(); |
840 | |
841 | LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] safestack applied\n" ; } } while (false); |
842 | return true; |
843 | } |
844 | |
845 | class SafeStackLegacyPass : public FunctionPass { |
846 | const TargetMachine *TM = nullptr; |
847 | |
848 | public: |
849 | static char ID; // Pass identification, replacement for typeid.. |
850 | |
851 | SafeStackLegacyPass() : FunctionPass(ID) { |
852 | initializeSafeStackLegacyPassPass(*PassRegistry::getPassRegistry()); |
853 | } |
854 | |
855 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
856 | AU.addRequired<TargetPassConfig>(); |
857 | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
858 | AU.addRequired<AssumptionCacheTracker>(); |
859 | } |
860 | |
861 | bool runOnFunction(Function &F) override { |
862 | LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] Function: " << F.getName() << "\n"; } } while (false); |
863 | |
864 | if (!F.hasFnAttribute(Attribute::SafeStack)) { |
865 | LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] safestack is not requested" " for this function\n"; } } while (false) |
866 | " for this function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] safestack is not requested" " for this function\n"; } } while (false); |
867 | return false; |
868 | } |
869 | |
870 | if (F.isDeclaration()) { |
871 | LLVM_DEBUG(dbgs() << "[SafeStack] function definition"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] function definition" " is not available\n"; } } while (false) |
872 | " is not available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("safe-stack")) { dbgs() << "[SafeStack] function definition" " is not available\n"; } } while (false); |
873 | return false; |
874 | } |
875 | |
876 | TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); |
877 | auto *TL = TM->getSubtargetImpl(F)->getTargetLowering(); |
878 | if (!TL) |
879 | report_fatal_error("TargetLowering instance is required"); |
880 | |
881 | auto *DL = &F.getParent()->getDataLayout(); |
882 | auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); |
883 | auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); |
884 | |
885 | // Compute DT and LI only for functions that have the attribute. |
886 | // This is only useful because the legacy pass manager doesn't let us |
887 | // compute analyzes lazily. |
888 | // In the backend pipeline, nothing preserves DT before SafeStack, so we |
889 | // would otherwise always compute it wastefully, even if there is no |
890 | // function with the safestack attribute. |
891 | DominatorTree DT(F); |
892 | LoopInfo LI(DT); |
893 | |
894 | ScalarEvolution SE(F, TLI, ACT, DT, LI); |
895 | |
896 | return SafeStack(F, *TL, *DL, SE).run(); |
897 | } |
898 | }; |
899 | |
900 | } // end anonymous namespace |
901 | |
902 | char SafeStackLegacyPass::ID = 0; |
903 | |
904 | INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, DEBUG_TYPE,static void *initializeSafeStackLegacyPassPassOnce(PassRegistry &Registry) { |
905 | "Safe Stack instrumentation pass", false, false)static void *initializeSafeStackLegacyPassPassOnce(PassRegistry &Registry) { |
906 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry); |
907 | INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Safe Stack instrumentation pass" , "safe-stack", &SafeStackLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<SafeStackLegacyPass>), false, false); Registry .registerPass(*PI, true); return PI; } static llvm::once_flag InitializeSafeStackLegacyPassPassFlag; void llvm::initializeSafeStackLegacyPassPass (PassRegistry &Registry) { llvm::call_once(InitializeSafeStackLegacyPassPassFlag , initializeSafeStackLegacyPassPassOnce, std::ref(Registry)); } |
908 | "Safe Stack instrumentation pass", false, false)PassInfo *PI = new PassInfo( "Safe Stack instrumentation pass" , "safe-stack", &SafeStackLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<SafeStackLegacyPass>), false, false); Registry .registerPass(*PI, true); return PI; } static llvm::once_flag InitializeSafeStackLegacyPassPassFlag; void llvm::initializeSafeStackLegacyPassPass (PassRegistry &Registry) { llvm::call_once(InitializeSafeStackLegacyPassPassFlag , initializeSafeStackLegacyPassPassOnce, std::ref(Registry)); } |
909 | |
910 | FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); } |