LLVM 22.0.0git
SafeStack.cpp
Go to the documentation of this file.
1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
18#include "SafeStackLayout.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/Statistic.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Use.h"
56#include "llvm/IR/Value.h"
58#include "llvm/Pass.h"
60#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72#include <utility>
73
74using namespace llvm;
75using namespace llvm::safestack;
76
77#define DEBUG_TYPE "safe-stack"
78
79STATISTIC(NumFunctions, "Total number of functions");
80STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
81STATISTIC(NumUnsafeStackRestorePointsFunctions,
82 "Number of functions that use setjmp or exceptions");
83
84STATISTIC(NumAllocas, "Total number of allocas");
85STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
86STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
87STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
88STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
89
90/// Use __safestack_pointer_address even if the platform has a faster way of
91/// access safe stack pointer.
92static cl::opt<bool>
93 SafeStackUsePointerAddress("safestack-use-pointer-address",
94 cl::init(false), cl::Hidden);
95
96static cl::opt<bool> ClColoring("safe-stack-coloring",
97 cl::desc("enable safe stack coloring"),
98 cl::Hidden, cl::init(true));
99
100namespace {
101
102/// The SafeStack pass splits the stack of each function into the safe
103/// stack, which is only accessed through memory safe dereferences (as
104/// determined statically), and the unsafe stack, which contains all
105/// local variables that are accessed in ways that we can't prove to
106/// be safe.
107class SafeStack {
108 Function &F;
109 const TargetLoweringBase &TL;
110 const DataLayout &DL;
111 DomTreeUpdater *DTU;
112 ScalarEvolution &SE;
113
114 Type *StackPtrTy;
115 Type *IntPtrTy;
116 Type *Int32Ty;
117
118 Value *UnsafeStackPtr = nullptr;
119
120 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
121 /// aligned to this value. We need to re-align the unsafe stack if the
122 /// alignment of any object on the stack exceeds this value.
123 ///
124 /// 16 seems like a reasonable upper bound on the alignment of objects that we
125 /// might expect to appear on the stack on most common targets.
126 static constexpr Align StackAlignment = Align::Constant<16>();
127
128 /// Return the value of the stack canary.
130
131 /// Load stack guard from the frame and check if it has changed.
132 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
133 AllocaInst *StackGuardSlot, Value *StackGuard);
134
135 /// Find all static allocas, dynamic allocas, return instructions and
136 /// stack restore points (exception unwind blocks and setjmp calls) in the
137 /// given function and append them to the respective vectors.
138 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
139 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
140 SmallVectorImpl<Argument *> &ByValArguments,
142 SmallVectorImpl<Instruction *> &StackRestorePoints);
143
144 /// Calculate the allocation size of a given alloca. Returns 0 if the
145 /// size can not be statically determined.
146 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
147
148 /// Allocate space for all static allocas in \p StaticAllocas,
149 /// replace allocas with pointers into the unsafe stack.
150 ///
151 /// \returns A pointer to the top of the unsafe stack after all unsafe static
152 /// allocas are allocated.
153 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
154 ArrayRef<AllocaInst *> StaticAllocas,
155 ArrayRef<Argument *> ByValArguments,
156 Instruction *BasePointer,
157 AllocaInst *StackGuardSlot);
158
159 /// Generate code to restore the stack after all stack restore points
160 /// in \p StackRestorePoints.
161 ///
162 /// \returns A local variable in which to maintain the dynamic top of the
163 /// unsafe stack if needed.
164 AllocaInst *
165 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
166 ArrayRef<Instruction *> StackRestorePoints,
167 Value *StaticTop, bool NeedDynamicTop);
168
169 /// Replace all allocas in \p DynamicAllocas with code to allocate
170 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
171 /// top to \p DynamicTop if non-null.
172 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
173 AllocaInst *DynamicTop,
174 ArrayRef<AllocaInst *> DynamicAllocas);
175
176 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
177
178 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
179 const Value *AllocaPtr, uint64_t AllocaSize);
180 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
181 uint64_t AllocaSize);
182
183 bool ShouldInlinePointerAddress(CallInst &CI);
184 void TryInlinePointerAddress();
185
186public:
187 SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL,
189 : F(F), TL(TL), DL(DL), DTU(DTU), SE(SE),
190 StackPtrTy(DL.getAllocaPtrType(F.getContext())),
191 IntPtrTy(DL.getIntPtrType(F.getContext())),
192 Int32Ty(Type::getInt32Ty(F.getContext())) {}
193
194 // Run the transformation on the associated function.
195 // Returns whether the function was changed.
196 bool run();
197};
198
199constexpr Align SafeStack::StackAlignment;
200
201uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
202 uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
203 if (AI->isArrayAllocation()) {
205 if (!C)
206 return 0;
207 Size *= C->getZExtValue();
208 }
209 return Size;
210}
211
212bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
213 const Value *AllocaPtr, uint64_t AllocaSize) {
214 const SCEV *AddrExpr = SE.getSCEV(Addr);
215 const auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(AddrExpr));
216 if (!Base || Base->getValue() != AllocaPtr) {
218 dbgs() << "[SafeStack] "
219 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
220 << *AllocaPtr << "\n"
221 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
222 return false;
223 }
224
225 const SCEV *Expr = SE.removePointerBase(AddrExpr);
226 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
227 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
228 ConstantRange SizeRange =
229 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
230 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
231 ConstantRange AllocaRange =
232 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
233 bool Safe = AllocaRange.contains(AccessRange);
234
236 dbgs() << "[SafeStack] "
237 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
238 << *AllocaPtr << "\n"
239 << " Access " << *Addr << "\n"
240 << " SCEV " << *Expr
241 << " U: " << SE.getUnsignedRange(Expr)
242 << ", S: " << SE.getSignedRange(Expr) << "\n"
243 << " Range " << AccessRange << "\n"
244 << " AllocaRange " << AllocaRange << "\n"
245 << " " << (Safe ? "safe" : "unsafe") << "\n");
246
247 return Safe;
248}
249
250bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
251 const Value *AllocaPtr,
252 uint64_t AllocaSize) {
253 if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
254 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
255 return true;
256 } else {
257 if (MI->getRawDest() != U)
258 return true;
259 }
260
261 auto Len = MI->getLengthInBytes();
262 // Non-constant size => unsafe. FIXME: try SCEV getRange.
263 if (!Len) return false;
264 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
265}
266
267/// Check whether a given allocation must be put on the safe
268/// stack or not. The function analyzes all uses of AI and checks whether it is
269/// only accessed in a memory safe way (as decided statically).
270bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
271 // Go through all uses of this alloca and check whether all accesses to the
272 // allocated object are statically known to be memory safe and, hence, the
273 // object can be placed on the safe stack.
274 SmallPtrSet<const Value *, 16> Visited;
275 SmallVector<const Value *, 8> WorkList;
276 WorkList.push_back(AllocaPtr);
277
278 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
279 while (!WorkList.empty()) {
280 const Value *V = WorkList.pop_back_val();
281 for (const Use &UI : V->uses()) {
282 auto I = cast<const Instruction>(UI.getUser());
283 assert(V == UI.get());
284
285 switch (I->getOpcode()) {
286 case Instruction::Load:
287 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
288 AllocaSize))
289 return false;
290 break;
291
292 case Instruction::VAArg:
293 // "va-arg" from a pointer is safe.
294 break;
295 case Instruction::Store:
296 if (V == I->getOperand(0)) {
297 // Stored the pointer - conservatively assume it may be unsafe.
299 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
300 << "\n store of address: " << *I << "\n");
301 return false;
302 }
303
304 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
305 AllocaPtr, AllocaSize))
306 return false;
307 break;
308
309 case Instruction::Ret:
310 // Information leak.
311 return false;
312
313 case Instruction::Call:
314 case Instruction::Invoke: {
315 const CallBase &CS = *cast<CallBase>(I);
316
317 if (I->isLifetimeStartOrEnd())
318 continue;
319
320 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
321 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
323 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
324 << "\n unsafe memintrinsic: " << *I << "\n");
325 return false;
326 }
327 continue;
328 }
329
330 // LLVM 'nocapture' attribute is only set for arguments whose address
331 // is not stored, passed around, or used in any other non-trivial way.
332 // We assume that passing a pointer to an object as a 'nocapture
333 // readnone' argument is safe.
334 // FIXME: a more precise solution would require an interprocedural
335 // analysis here, which would look at all uses of an argument inside
336 // the function being called.
337 auto B = CS.arg_begin(), E = CS.arg_end();
338 for (const auto *A = B; A != E; ++A)
339 if (A->get() == V)
340 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
341 CS.doesNotAccessMemory()))) {
342 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
343 << "\n unsafe call: " << *I << "\n");
344 return false;
345 }
346 continue;
347 }
348
349 default:
350 if (Visited.insert(I).second)
352 }
353 }
354 }
355
356 // All uses of the alloca are safe, we can place it on the safe stack.
357 return true;
358}
359
360Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
361 Value *StackGuardVar = TL.getIRStackGuard(IRB);
362 Module *M = F.getParent();
363
364 if (!StackGuardVar) {
366 return IRB.CreateIntrinsic(Intrinsic::stackguard, {});
367 }
368
369 return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
370}
371
372void SafeStack::findInsts(Function &F,
373 SmallVectorImpl<AllocaInst *> &StaticAllocas,
374 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
375 SmallVectorImpl<Argument *> &ByValArguments,
376 SmallVectorImpl<Instruction *> &Returns,
377 SmallVectorImpl<Instruction *> &StackRestorePoints) {
378 for (Instruction &I : instructions(&F)) {
379 if (auto AI = dyn_cast<AllocaInst>(&I)) {
380 ++NumAllocas;
381
382 uint64_t Size = getStaticAllocaAllocationSize(AI);
383 if (IsSafeStackAlloca(AI, Size))
384 continue;
385
386 if (AI->isStaticAlloca()) {
387 ++NumUnsafeStaticAllocas;
388 StaticAllocas.push_back(AI);
389 } else {
390 ++NumUnsafeDynamicAllocas;
391 DynamicAllocas.push_back(AI);
392 }
393 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
394 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
395 Returns.push_back(CI);
396 else
397 Returns.push_back(RI);
398 } else if (auto CI = dyn_cast<CallInst>(&I)) {
399 // setjmps require stack restore.
400 if (CI->getCalledFunction() && CI->canReturnTwice())
401 StackRestorePoints.push_back(CI);
402 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
403 // Exception landing pads require stack restore.
404 StackRestorePoints.push_back(LP);
405 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
406 if (II->getIntrinsicID() == Intrinsic::gcroot)
408 "gcroot intrinsic not compatible with safestack attribute");
409 }
410 }
411 for (Argument &Arg : F.args()) {
412 if (!Arg.hasByValAttr())
413 continue;
414 uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType());
415 if (IsSafeStackAlloca(&Arg, Size))
416 continue;
417
418 ++NumUnsafeByValArguments;
419 ByValArguments.push_back(&Arg);
420 }
421}
422
423AllocaInst *
424SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
425 ArrayRef<Instruction *> StackRestorePoints,
426 Value *StaticTop, bool NeedDynamicTop) {
427 assert(StaticTop && "The stack top isn't set.");
428
429 if (StackRestorePoints.empty())
430 return nullptr;
431
432 // We need the current value of the shadow stack pointer to restore
433 // after longjmp or exception catching.
434
435 // FIXME: On some platforms this could be handled by the longjmp/exception
436 // runtime itself.
437
438 AllocaInst *DynamicTop = nullptr;
439 if (NeedDynamicTop) {
440 // If we also have dynamic alloca's, the stack pointer value changes
441 // throughout the function. For now we store it in an alloca.
442 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
443 "unsafe_stack_dynamic_ptr");
444 IRB.CreateStore(StaticTop, DynamicTop);
445 }
446
447 // Restore current stack pointer after longjmp/exception catch.
448 for (Instruction *I : StackRestorePoints) {
449 ++NumUnsafeStackRestorePoints;
450
451 IRB.SetInsertPoint(I->getNextNode());
452 Value *CurrentTop =
453 DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
454 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
455 }
456
457 return DynamicTop;
458}
459
460void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
461 AllocaInst *StackGuardSlot, Value *StackGuard) {
462 Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
463 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
464
467 MDNode *Weights = MDBuilder(F.getContext())
468 .createBranchWeights(SuccessProb.getNumerator(),
469 FailureProb.getNumerator());
470 Instruction *CheckTerm =
471 SplitBlockAndInsertIfThen(Cmp, &RI, /* Unreachable */ true, Weights, DTU);
472 IRBuilder<> IRBFail(CheckTerm);
473 // FIXME: respect -fsanitize-trap / -ftrap-function here?
474 const char *StackChkFailName =
475 TL.getLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL);
476 if (!StackChkFailName) {
477 F.getContext().emitError(
478 "no libcall available for stackprotector check fail");
479 return;
480 }
481
482 FunctionCallee StackChkFail =
483 F.getParent()->getOrInsertFunction(StackChkFailName, IRB.getVoidTy());
484 IRBFail.CreateCall(StackChkFail, {});
485}
486
487/// We explicitly compute and set the unsafe stack layout for all unsafe
488/// static alloca instructions. We save the unsafe "base pointer" in the
489/// prologue into a local variable and restore it in the epilogue.
490Value *SafeStack::moveStaticAllocasToUnsafeStack(
491 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
492 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
493 AllocaInst *StackGuardSlot) {
494 if (StaticAllocas.empty() && ByValArguments.empty())
495 return BasePointer;
496
497 DIBuilder DIB(*F.getParent());
498
499 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
500 static const StackLifetime::LiveRange NoColoringRange(1, true);
501 if (ClColoring)
502 SSC.run();
503
504 for (const auto *I : SSC.getMarkers()) {
505 auto *Op = dyn_cast<Instruction>(I->getOperand(1));
506 const_cast<IntrinsicInst *>(I)->eraseFromParent();
507 // Remove the operand bitcast, too, if it has no more uses left.
508 if (Op && Op->use_empty())
509 Op->eraseFromParent();
510 }
511
512 // Unsafe stack always grows down.
513 StackLayout SSL(StackAlignment);
514 if (StackGuardSlot) {
515 Type *Ty = StackGuardSlot->getAllocatedType();
516 Align Align = std::max(DL.getPrefTypeAlign(Ty), StackGuardSlot->getAlign());
517 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
518 Align, SSC.getFullLiveRange());
519 }
520
521 for (Argument *Arg : ByValArguments) {
522 Type *Ty = Arg->getParamByValType();
523 uint64_t Size = DL.getTypeStoreSize(Ty);
524 if (Size == 0)
525 Size = 1; // Don't create zero-sized stack objects.
526
527 // Ensure the object is properly aligned.
528 Align Align = DL.getPrefTypeAlign(Ty);
529 if (auto A = Arg->getParamAlign())
530 Align = std::max(Align, *A);
531 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
532 }
533
534 for (AllocaInst *AI : StaticAllocas) {
535 Type *Ty = AI->getAllocatedType();
536 uint64_t Size = getStaticAllocaAllocationSize(AI);
537 if (Size == 0)
538 Size = 1; // Don't create zero-sized stack objects.
539
540 // Ensure the object is properly aligned.
541 Align Align = std::max(DL.getPrefTypeAlign(Ty), AI->getAlign());
542
543 SSL.addObject(AI, Size, Align,
544 ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
545 }
546
547 SSL.computeLayout();
548 Align FrameAlignment = SSL.getFrameAlignment();
549
550 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
551 // (AlignmentSkew).
552 if (FrameAlignment > StackAlignment) {
553 // Re-align the base pointer according to the max requested alignment.
554 IRB.SetInsertPoint(BasePointer->getNextNode());
555 BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
556 IRB.CreateAnd(
557 IRB.CreatePtrToInt(BasePointer, IntPtrTy),
558 ConstantInt::get(IntPtrTy, ~(FrameAlignment.value() - 1))),
559 StackPtrTy));
560 }
561
562 IRB.SetInsertPoint(BasePointer->getNextNode());
563
564 if (StackGuardSlot) {
565 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
566 Value *Off =
567 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
568 Value *NewAI =
569 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
570
571 // Replace alloc with the new location.
572 StackGuardSlot->replaceAllUsesWith(NewAI);
573 StackGuardSlot->eraseFromParent();
574 }
575
576 for (Argument *Arg : ByValArguments) {
577 unsigned Offset = SSL.getObjectOffset(Arg);
578 MaybeAlign Align(SSL.getObjectAlignment(Arg));
579 Type *Ty = Arg->getParamByValType();
580
581 uint64_t Size = DL.getTypeStoreSize(Ty);
582 if (Size == 0)
583 Size = 1; // Don't create zero-sized stack objects.
584
585 Value *Off =
586 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
587 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
588 Arg->getName() + ".unsafe-byval");
589
590 // Replace alloc with the new location.
591 replaceDbgDeclare(Arg, BasePointer, DIB, DIExpression::ApplyOffset,
592 -Offset);
593 Arg->replaceAllUsesWith(NewArg);
595 IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size);
596 }
597
598 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
599 for (AllocaInst *AI : StaticAllocas) {
600 IRB.SetInsertPoint(AI);
601 unsigned Offset = SSL.getObjectOffset(AI);
602
603 replaceDbgDeclare(AI, BasePointer, DIB, DIExpression::ApplyOffset, -Offset);
604 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
605
606 // Replace uses of the alloca with the new location.
607 // Insert address calculation close to each use to work around PR27844.
608 std::string Name = std::string(AI->getName()) + ".unsafe";
609 while (!AI->use_empty()) {
610 Use &U = *AI->use_begin();
611 Instruction *User = cast<Instruction>(U.getUser());
612
613 // Drop lifetime markers now that this is no longer an alloca.
614 // SafeStack has already performed its own stack coloring.
615 if (User->isLifetimeStartOrEnd()) {
616 User->eraseFromParent();
617 continue;
618 }
619
620 Instruction *InsertBefore;
621 if (auto *PHI = dyn_cast<PHINode>(User))
622 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
623 else
624 InsertBefore = User;
625
626 IRBuilder<> IRBUser(InsertBefore);
627 Value *Off =
628 IRBUser.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
629 Value *Replacement =
630 IRBUser.CreateAddrSpaceCast(Off, AI->getType(), Name);
631
632 if (auto *PHI = dyn_cast<PHINode>(User))
633 // PHI nodes may have multiple incoming edges from the same BB (why??),
634 // all must be updated at once with the same incoming value.
635 PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement);
636 else
637 U.set(Replacement);
638 }
639
640 AI->eraseFromParent();
641 }
642
643 // Re-align BasePointer so that our callees would see it aligned as
644 // expected.
645 // FIXME: no need to update BasePointer in leaf functions.
646 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
647
648 MDBuilder MDB(F.getContext());
650 Data.push_back(MDB.createString("unsafe-stack-size"));
651 Data.push_back(MDB.createConstant(ConstantInt::get(Int32Ty, FrameSize)));
652 MDNode *MD = MDTuple::get(F.getContext(), Data);
653 F.setMetadata(LLVMContext::MD_annotation, MD);
654
655 // Update shadow stack pointer in the function epilogue.
656 IRB.SetInsertPoint(BasePointer->getNextNode());
657
658 Value *StaticTop =
659 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
660 "unsafe_stack_static_top");
661 IRB.CreateStore(StaticTop, UnsafeStackPtr);
662 return StaticTop;
663}
664
665void SafeStack::moveDynamicAllocasToUnsafeStack(
666 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
667 ArrayRef<AllocaInst *> DynamicAllocas) {
668 DIBuilder DIB(*F.getParent());
669
670 for (AllocaInst *AI : DynamicAllocas) {
671 IRBuilder<> IRB(AI);
672
673 // Compute the new SP value (after AI).
674 Value *ArraySize = AI->getArraySize();
675 if (ArraySize->getType() != IntPtrTy)
676 ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
677
678 Type *Ty = AI->getAllocatedType();
679 uint64_t TySize = DL.getTypeAllocSize(Ty);
680 Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
681
682 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
683 IntPtrTy);
684 SP = IRB.CreateSub(SP, Size);
685
686 // Align the SP value to satisfy the AllocaInst, type and stack alignments.
687 auto Align = std::max(std::max(DL.getPrefTypeAlign(Ty), AI->getAlign()),
688 StackAlignment);
689
690 Value *NewTop = IRB.CreateIntToPtr(
691 IRB.CreateAnd(SP,
692 ConstantInt::get(IntPtrTy, ~uint64_t(Align.value() - 1))),
693 StackPtrTy);
694
695 // Save the stack pointer.
696 IRB.CreateStore(NewTop, UnsafeStackPtr);
697 if (DynamicTop)
698 IRB.CreateStore(NewTop, DynamicTop);
699
700 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
701 if (AI->hasName() && isa<Instruction>(NewAI))
702 NewAI->takeName(AI);
703
705 AI->replaceAllUsesWith(NewAI);
706 AI->eraseFromParent();
707 }
708
709 if (!DynamicAllocas.empty()) {
710 // Now go through the instructions again, replacing stacksave/stackrestore.
711 for (Instruction &I : llvm::make_early_inc_range(instructions(&F))) {
712 auto *II = dyn_cast<IntrinsicInst>(&I);
713 if (!II)
714 continue;
715
716 if (II->getIntrinsicID() == Intrinsic::stacksave) {
717 IRBuilder<> IRB(II);
718 Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
719 LI->takeName(II);
720 II->replaceAllUsesWith(LI);
721 II->eraseFromParent();
722 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
723 IRBuilder<> IRB(II);
724 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
725 SI->takeName(II);
726 assert(II->use_empty());
727 II->eraseFromParent();
728 }
729 }
730 }
731}
732
733bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
735 if (CI.hasFnAttr(Attribute::AlwaysInline) &&
736 isInlineViable(*Callee).isSuccess())
737 return true;
738 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
739 CI.isNoInline())
740 return false;
741 return true;
742}
743
744void SafeStack::TryInlinePointerAddress() {
745 auto *CI = dyn_cast<CallInst>(UnsafeStackPtr);
746 if (!CI)
747 return;
748
749 if(F.hasOptNone())
750 return;
751
753 if (!Callee || Callee->isDeclaration())
754 return;
755
756 if (!ShouldInlinePointerAddress(*CI))
757 return;
758
759 InlineFunctionInfo IFI;
760 InlineFunction(*CI, IFI);
761}
762
763bool SafeStack::run() {
764 assert(F.hasFnAttribute(Attribute::SafeStack) &&
765 "Can't run SafeStack on a function without the attribute");
766 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
767
768 ++NumFunctions;
769
770 SmallVector<AllocaInst *, 16> StaticAllocas;
771 SmallVector<AllocaInst *, 4> DynamicAllocas;
772 SmallVector<Argument *, 4> ByValArguments;
773 SmallVector<Instruction *, 4> Returns;
774
775 // Collect all points where stack gets unwound and needs to be restored
776 // This is only necessary because the runtime (setjmp and unwind code) is
777 // not aware of the unsafe stack and won't unwind/restore it properly.
778 // To work around this problem without changing the runtime, we insert
779 // instrumentation to restore the unsafe stack pointer when necessary.
780 SmallVector<Instruction *, 4> StackRestorePoints;
781
782 // Find all static and dynamic alloca instructions that must be moved to the
783 // unsafe stack, all return instructions and stack restore points.
784 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
785 StackRestorePoints);
786
787 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
788 ByValArguments.empty() && StackRestorePoints.empty())
789 return false; // Nothing to do in this function.
790
791 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
792 !ByValArguments.empty())
793 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
794
795 if (!StackRestorePoints.empty())
796 ++NumUnsafeStackRestorePointsFunctions;
797
798 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
799 // Calls must always have a debug location, or else inlining breaks. So
800 // we explicitly set a artificial debug location here.
801 if (DISubprogram *SP = F.getSubprogram())
803 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP));
805 const char *SafestackPointerAddressName =
806 TL.getLibcallName(RTLIB::SAFESTACK_POINTER_ADDRESS);
807 if (!SafestackPointerAddressName) {
808 F.getContext().emitError(
809 "no libcall available for safestack pointer address");
810 return false;
811 }
812
813 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
814 SafestackPointerAddressName, IRB.getPtrTy(0));
815 UnsafeStackPtr = IRB.CreateCall(Fn);
816 } else {
817 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB);
818 }
819
820 // Load the current stack pointer (we'll also use it as a base pointer).
821 // FIXME: use a dedicated register for it ?
822 Instruction *BasePointer =
823 IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
824 assert(BasePointer->getType() == StackPtrTy);
825
826 AllocaInst *StackGuardSlot = nullptr;
827 // FIXME: implement weaker forms of stack protector.
828 if (F.hasFnAttribute(Attribute::StackProtect) ||
829 F.hasFnAttribute(Attribute::StackProtectStrong) ||
830 F.hasFnAttribute(Attribute::StackProtectReq)) {
831 Value *StackGuard = getStackGuard(IRB, F);
832 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
833 IRB.CreateStore(StackGuard, StackGuardSlot);
834
835 for (Instruction *RI : Returns) {
836 IRBuilder<> IRBRet(RI);
837 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
838 }
839 }
840
841 // The top of the unsafe stack after all unsafe static allocas are
842 // allocated.
843 Value *StaticTop = moveStaticAllocasToUnsafeStack(
844 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
845
846 // Safe stack object that stores the current unsafe stack top. It is updated
847 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
848 // This is only needed if we need to restore stack pointer after longjmp
849 // or exceptions, and we have dynamic allocations.
850 // FIXME: a better alternative might be to store the unsafe stack pointer
851 // before setjmp / invoke instructions.
852 AllocaInst *DynamicTop = createStackRestorePoints(
853 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
854
855 // Handle dynamic allocas.
856 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
857 DynamicAllocas);
858
859 // Restore the unsafe stack pointer before each return.
860 for (Instruction *RI : Returns) {
861 IRB.SetInsertPoint(RI);
862 IRB.CreateStore(BasePointer, UnsafeStackPtr);
863 }
864
865 TryInlinePointerAddress();
866
867 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
868 return true;
869}
870
871class SafeStackLegacyPass : public FunctionPass {
872 const TargetMachine *TM = nullptr;
873
874public:
875 static char ID; // Pass identification, replacement for typeid..
876
877 SafeStackLegacyPass() : FunctionPass(ID) {
879 }
880
881 void getAnalysisUsage(AnalysisUsage &AU) const override {
882 AU.addRequired<TargetPassConfig>();
883 AU.addRequired<TargetLibraryInfoWrapperPass>();
884 AU.addRequired<AssumptionCacheTracker>();
885 AU.addPreserved<DominatorTreeWrapperPass>();
886 }
887
888 bool runOnFunction(Function &F) override {
889 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
890
891 if (!F.hasFnAttribute(Attribute::SafeStack)) {
892 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
893 " for this function\n");
894 return false;
895 }
896
897 if (F.isDeclaration()) {
898 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
899 " is not available\n");
900 return false;
901 }
902
903 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
904 auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
905 if (!TL)
906 report_fatal_error("TargetLowering instance is required");
907
908 auto *DL = &F.getDataLayout();
909 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
910 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
911
912 // Compute DT and LI only for functions that have the attribute.
913 // This is only useful because the legacy pass manager doesn't let us
914 // compute analyzes lazily.
915
916 DominatorTree *DT;
917 bool ShouldPreserveDominatorTree;
918 std::optional<DominatorTree> LazilyComputedDomTree;
919
920 // Do we already have a DominatorTree available from the previous pass?
921 // Note that we should *NOT* require it, to avoid the case where we end up
922 // not needing it, but the legacy PM would have computed it for us anyways.
923 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
924 DT = &DTWP->getDomTree();
925 ShouldPreserveDominatorTree = true;
926 } else {
927 // Otherwise, we need to compute it.
928 LazilyComputedDomTree.emplace(F);
929 DT = &*LazilyComputedDomTree;
930 ShouldPreserveDominatorTree = false;
931 }
932
933 // Likewise, lazily compute loop info.
934 LoopInfo LI(*DT);
935
936 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
937
938 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
939
940 return SafeStack(F, *TL, *DL, ShouldPreserveDominatorTree ? &DTU : nullptr,
941 SE)
942 .run();
943 }
944};
945
946} // end anonymous namespace
947
950 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
951
952 if (!F.hasFnAttribute(Attribute::SafeStack)) {
953 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
954 " for this function\n");
955 return PreservedAnalyses::all();
956 }
957
958 if (F.isDeclaration()) {
959 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
960 " is not available\n");
961 return PreservedAnalyses::all();
962 }
963
964 auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
965 if (!TL)
966 report_fatal_error("TargetLowering instance is required");
967
968 auto &DL = F.getDataLayout();
969
970 // preserve DominatorTree
971 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
972 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
973 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
974
975 bool Changed = SafeStack(F, *TL, DL, &DTU, SE).run();
976
977 if (!Changed)
978 return PreservedAnalyses::all();
981 return PA;
982}
983
984char SafeStackLegacyPass::ID = 0;
985
987 "Safe Stack instrumentation pass", false, false)
990INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
991 "Safe Stack instrumentation pass", false, false)
992
993FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer.
static cl::opt< bool > ClColoring("safe-stack-coloring", cl::desc("enable safe stack coloring"), cl::Hidden, cl::init(true))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static BranchProbability getBranchProbStackProtector(bool IsLikely)
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isNoInline() const
Return true if the call should not be inlined.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1833
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2254
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2202
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2336
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2207
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1850
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1863
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2197
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2511
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:605
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2280
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
bool isSuccess() const
Definition InlineCost.h:190
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1526
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
use_iterator use_begin()
Definition Value.h:364
bool use_empty() const
Definition Value.h:346
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
LLVM_ABI FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
LLVM_ABI void initializeSafeStackLegacyPassPass(PassRegistry &)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:1982
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1942
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
static constexpr Align Constant()
Allow constructions of constexpr Align.
Definition Alignment.h:88