LLVM 23.0.0git
SafeStack.cpp
Go to the documentation of this file.
1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
18#include "SafeStackLayout.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/Statistic.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Use.h"
56#include "llvm/IR/Value.h"
58#include "llvm/Pass.h"
60#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72
73using namespace llvm;
74using namespace llvm::safestack;
75
76#define DEBUG_TYPE "safe-stack"
77
78STATISTIC(NumFunctions, "Total number of functions");
79STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
80STATISTIC(NumUnsafeStackRestorePointsFunctions,
81 "Number of functions that use setjmp or exceptions");
82
83STATISTIC(NumAllocas, "Total number of allocas");
84STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
85STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
86STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
87STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
88
89/// Use __safestack_pointer_address even if the platform has a faster way of
90/// access safe stack pointer.
91static cl::opt<bool>
92 SafeStackUsePointerAddress("safestack-use-pointer-address",
93 cl::init(false), cl::Hidden);
94
95static cl::opt<bool> ClColoring("safe-stack-coloring",
96 cl::desc("enable safe stack coloring"),
97 cl::Hidden, cl::init(true));
98
99namespace {
100
101/// The SafeStack pass splits the stack of each function into the safe
102/// stack, which is only accessed through memory safe dereferences (as
103/// determined statically), and the unsafe stack, which contains all
104/// local variables that are accessed in ways that we can't prove to
105/// be safe.
106class SafeStack {
107 Function &F;
108 const TargetLoweringBase &TL;
109 const LibcallLoweringInfo &Libcalls;
110 const DataLayout &DL;
111 DomTreeUpdater *DTU;
112 ScalarEvolution &SE;
113
114 Type *StackPtrTy;
115 Type *AddrTy;
116 Type *Int32Ty;
117
118 Value *UnsafeStackPtr = nullptr;
119
120 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
121 /// aligned to this value. We need to re-align the unsafe stack if the
122 /// alignment of any object on the stack exceeds this value.
123 ///
124 /// 16 seems like a reasonable upper bound on the alignment of objects that we
125 /// might expect to appear on the stack on most common targets.
126 static constexpr Align StackAlignment = Align::Constant<16>();
127
128 /// Return the value of the stack canary.
130
131 /// Load stack guard from the frame and check if it has changed.
132 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
133 AllocaInst *StackGuardSlot, Value *StackGuard);
134
135 /// Find all static allocas, dynamic allocas, return instructions and
136 /// stack restore points (exception unwind blocks and setjmp calls) in the
137 /// given function and append them to the respective vectors.
138 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
139 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
140 SmallVectorImpl<Argument *> &ByValArguments,
142 SmallVectorImpl<Instruction *> &StackRestorePoints);
143
144 /// Calculate the allocation size of a given alloca. Returns 0 if the
145 /// size can not be statically determined.
146 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
147
148 /// Allocate space for all static allocas in \p StaticAllocas,
149 /// replace allocas with pointers into the unsafe stack.
150 ///
151 /// \returns A pointer to the top of the unsafe stack after all unsafe static
152 /// allocas are allocated.
153 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
154 ArrayRef<AllocaInst *> StaticAllocas,
155 ArrayRef<Argument *> ByValArguments,
156 Instruction *BasePointer,
157 AllocaInst *StackGuardSlot);
158
159 /// Generate code to restore the stack after all stack restore points
160 /// in \p StackRestorePoints.
161 ///
162 /// \returns A local variable in which to maintain the dynamic top of the
163 /// unsafe stack if needed.
164 AllocaInst *
165 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
166 ArrayRef<Instruction *> StackRestorePoints,
167 Value *StaticTop, bool NeedDynamicTop);
168
169 /// Replace all allocas in \p DynamicAllocas with code to allocate
170 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
171 /// top to \p DynamicTop if non-null.
172 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
173 AllocaInst *DynamicTop,
174 ArrayRef<AllocaInst *> DynamicAllocas);
175
176 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
177
178 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
179 const Value *AllocaPtr, uint64_t AllocaSize);
180 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
181 uint64_t AllocaSize);
182
183 bool ShouldInlinePointerAddress(CallInst &CI);
184 void TryInlinePointerAddress();
185
186public:
187 SafeStack(Function &F, const TargetLoweringBase &TL,
188 const LibcallLoweringInfo &Libcalls, const DataLayout &DL,
190 : F(F), TL(TL), Libcalls(Libcalls), DL(DL), DTU(DTU), SE(SE),
191 StackPtrTy(DL.getAllocaPtrType(F.getContext())),
192 AddrTy(DL.getAddressType(StackPtrTy)),
193 Int32Ty(Type::getInt32Ty(F.getContext())) {}
194
195 // Run the transformation on the associated function.
196 // Returns whether the function was changed.
197 bool run();
198};
199
200uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
201 if (auto Size = AI->getAllocationSize(DL))
202 if (Size->isFixed())
203 return Size->getFixedValue();
204 return 0;
205}
206
207bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
208 const Value *AllocaPtr, uint64_t AllocaSize) {
209 const SCEV *AddrExpr = SE.getSCEV(Addr);
210 const auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(AddrExpr));
211 if (!Base || Base->getValue() != AllocaPtr) {
213 dbgs() << "[SafeStack] "
214 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
215 << *AllocaPtr << "\n"
216 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
217 return false;
218 }
219
220 const SCEV *Expr = SE.removePointerBase(AddrExpr);
221 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
222 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
223 ConstantRange SizeRange =
224 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
225 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
226 ConstantRange AllocaRange =
227 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
228 bool Safe = AllocaRange.contains(AccessRange);
229
231 dbgs() << "[SafeStack] "
232 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
233 << *AllocaPtr << "\n"
234 << " Access " << *Addr << "\n"
235 << " SCEV " << *Expr
236 << " U: " << SE.getUnsignedRange(Expr)
237 << ", S: " << SE.getSignedRange(Expr) << "\n"
238 << " Range " << AccessRange << "\n"
239 << " AllocaRange " << AllocaRange << "\n"
240 << " " << (Safe ? "safe" : "unsafe") << "\n");
241
242 return Safe;
243}
244
245bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
246 const Value *AllocaPtr,
247 uint64_t AllocaSize) {
248 if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
249 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
250 return true;
251 } else {
252 if (MI->getRawDest() != U)
253 return true;
254 }
255
256 auto Len = MI->getLengthInBytes();
257 // Non-constant size => unsafe. FIXME: try SCEV getRange.
258 if (!Len) return false;
259 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
260}
261
262/// Check whether a given allocation must be put on the safe
263/// stack or not. The function analyzes all uses of AI and checks whether it is
264/// only accessed in a memory safe way (as decided statically).
265bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
266 // Go through all uses of this alloca and check whether all accesses to the
267 // allocated object are statically known to be memory safe and, hence, the
268 // object can be placed on the safe stack.
269 SmallPtrSet<const Value *, 16> Visited;
270 SmallVector<const Value *, 8> WorkList;
271 WorkList.push_back(AllocaPtr);
272
273 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
274 while (!WorkList.empty()) {
275 const Value *V = WorkList.pop_back_val();
276 for (const Use &UI : V->uses()) {
277 auto I = cast<const Instruction>(UI.getUser());
278 assert(V == UI.get());
279
280 switch (I->getOpcode()) {
281 case Instruction::Load:
282 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
283 AllocaSize))
284 return false;
285 break;
286
287 case Instruction::VAArg:
288 // "va-arg" from a pointer is safe.
289 break;
290 case Instruction::Store:
291 if (V == I->getOperand(0)) {
292 // Stored the pointer - conservatively assume it may be unsafe.
294 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
295 << "\n store of address: " << *I << "\n");
296 return false;
297 }
298
299 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
300 AllocaPtr, AllocaSize))
301 return false;
302 break;
303
304 case Instruction::Ret:
305 // Information leak.
306 return false;
307
308 case Instruction::Call:
309 case Instruction::Invoke: {
310 const CallBase &CS = *cast<CallBase>(I);
311
312 if (I->isLifetimeStartOrEnd())
313 continue;
314
315 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
316 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
318 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
319 << "\n unsafe memintrinsic: " << *I << "\n");
320 return false;
321 }
322 continue;
323 }
324
325 // LLVM 'nocapture' attribute is only set for arguments whose address
326 // is not stored, passed around, or used in any other non-trivial way.
327 // We assume that passing a pointer to an object as a 'nocapture
328 // readnone' argument is safe.
329 // FIXME: a more precise solution would require an interprocedural
330 // analysis here, which would look at all uses of an argument inside
331 // the function being called.
332 auto B = CS.arg_begin(), E = CS.arg_end();
333 for (const auto *A = B; A != E; ++A)
334 if (A->get() == V)
335 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
336 CS.doesNotAccessMemory()))) {
337 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
338 << "\n unsafe call: " << *I << "\n");
339 return false;
340 }
341 continue;
342 }
343
344 default:
345 if (Visited.insert(I).second)
347 }
348 }
349 }
350
351 // All uses of the alloca are safe, we can place it on the safe stack.
352 return true;
353}
354
355Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
356 Value *StackGuardVar = TL.getIRStackGuard(IRB, Libcalls);
357 Module *M = F.getParent();
358
359 if (!StackGuardVar) {
360 TL.insertSSPDeclarations(*M, Libcalls);
361 return IRB.CreateIntrinsic(Intrinsic::stackguard, {});
362 }
363
364 return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
365}
366
367void SafeStack::findInsts(Function &F,
368 SmallVectorImpl<AllocaInst *> &StaticAllocas,
369 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
370 SmallVectorImpl<Argument *> &ByValArguments,
371 SmallVectorImpl<Instruction *> &Returns,
372 SmallVectorImpl<Instruction *> &StackRestorePoints) {
373 for (Instruction &I : instructions(&F)) {
374 if (auto AI = dyn_cast<AllocaInst>(&I)) {
375 ++NumAllocas;
376
377 uint64_t Size = getStaticAllocaAllocationSize(AI);
378 if (IsSafeStackAlloca(AI, Size))
379 continue;
380
381 if (AI->isStaticAlloca()) {
382 ++NumUnsafeStaticAllocas;
383 StaticAllocas.push_back(AI);
384 } else {
385 ++NumUnsafeDynamicAllocas;
386 DynamicAllocas.push_back(AI);
387 }
388 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
389 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
390 Returns.push_back(CI);
391 else
392 Returns.push_back(RI);
393 } else if (auto CI = dyn_cast<CallInst>(&I)) {
394 // setjmps require stack restore.
395 if (CI->getCalledFunction() && CI->canReturnTwice())
396 StackRestorePoints.push_back(CI);
397 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
398 // Exception landing pads require stack restore.
399 StackRestorePoints.push_back(LP);
400 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
401 if (II->getIntrinsicID() == Intrinsic::gcroot)
403 "gcroot intrinsic not compatible with safestack attribute");
404 }
405 }
406 for (Argument &Arg : F.args()) {
407 if (!Arg.hasByValAttr())
408 continue;
409 uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType());
410 if (IsSafeStackAlloca(&Arg, Size))
411 continue;
412
413 ++NumUnsafeByValArguments;
414 ByValArguments.push_back(&Arg);
415 }
416}
417
418AllocaInst *
419SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
420 ArrayRef<Instruction *> StackRestorePoints,
421 Value *StaticTop, bool NeedDynamicTop) {
422 assert(StaticTop && "The stack top isn't set.");
423
424 if (StackRestorePoints.empty())
425 return nullptr;
426
427 // We need the current value of the shadow stack pointer to restore
428 // after longjmp or exception catching.
429
430 // FIXME: On some platforms this could be handled by the longjmp/exception
431 // runtime itself.
432
433 AllocaInst *DynamicTop = nullptr;
434 if (NeedDynamicTop) {
435 // If we also have dynamic alloca's, the stack pointer value changes
436 // throughout the function. For now we store it in an alloca.
437 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
438 "unsafe_stack_dynamic_ptr");
439 IRB.CreateStore(StaticTop, DynamicTop);
440 }
441
442 // Restore current stack pointer after longjmp/exception catch.
443 for (Instruction *I : StackRestorePoints) {
444 ++NumUnsafeStackRestorePoints;
445
446 IRB.SetInsertPoint(I->getNextNode());
447 Value *CurrentTop =
448 DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
449 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
450 }
451
452 return DynamicTop;
453}
454
455void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
456 AllocaInst *StackGuardSlot, Value *StackGuard) {
457 Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
458 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
459
462 MDNode *Weights = MDBuilder(F.getContext())
463 .createBranchWeights(SuccessProb.getNumerator(),
464 FailureProb.getNumerator());
465 Instruction *CheckTerm =
466 SplitBlockAndInsertIfThen(Cmp, &RI, /* Unreachable */ true, Weights, DTU);
467 IRBuilder<> IRBFail(CheckTerm);
468 // FIXME: respect -fsanitize-trap / -ftrap-function here?
469 RTLIB::LibcallImpl StackChkFailImpl =
470 Libcalls.getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
471 if (StackChkFailImpl == RTLIB::Unsupported) {
472 F.getContext().emitError(
473 "no libcall available for stackprotector check fail");
474 return;
475 }
476
477 StringRef StackChkFailName =
479
480 FunctionCallee StackChkFail =
481 F.getParent()->getOrInsertFunction(StackChkFailName, IRB.getVoidTy());
482 IRBFail.CreateCall(StackChkFail, {});
483}
484
485/// We explicitly compute and set the unsafe stack layout for all unsafe
486/// static alloca instructions. We save the unsafe "base pointer" in the
487/// prologue into a local variable and restore it in the epilogue.
488Value *SafeStack::moveStaticAllocasToUnsafeStack(
489 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
490 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
491 AllocaInst *StackGuardSlot) {
492 if (StaticAllocas.empty() && ByValArguments.empty())
493 return BasePointer;
494
495 DIBuilder DIB(*F.getParent());
496
497 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
498 static const StackLifetime::LiveRange NoColoringRange(1, true);
499 if (ClColoring)
500 SSC.run();
501
502 for (const auto *I : SSC.getMarkers()) {
503 auto *Op = dyn_cast<Instruction>(I->getOperand(1));
504 const_cast<IntrinsicInst *>(I)->eraseFromParent();
505 // Remove the operand bitcast, too, if it has no more uses left.
506 if (Op && Op->use_empty())
507 Op->eraseFromParent();
508 }
509
510 // Unsafe stack always grows down.
511 StackLayout SSL(StackAlignment);
512 if (StackGuardSlot) {
513 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
514 StackGuardSlot->getAlign(), SSC.getFullLiveRange());
515 }
516
517 for (Argument *Arg : ByValArguments) {
518 Type *Ty = Arg->getParamByValType();
519 uint64_t Size = DL.getTypeStoreSize(Ty);
520 if (Size == 0)
521 Size = 1; // Don't create zero-sized stack objects.
522
523 // Ensure the object is properly aligned.
524 Align Align = DL.getPrefTypeAlign(Ty);
525 if (auto A = Arg->getParamAlign())
526 Align = std::max(Align, *A);
527 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
528 }
529
530 for (AllocaInst *AI : StaticAllocas) {
531 uint64_t Size = getStaticAllocaAllocationSize(AI);
532 if (Size == 0)
533 Size = 1; // Don't create zero-sized stack objects.
534
535 SSL.addObject(AI, Size, AI->getAlign(),
536 ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
537 }
538
539 SSL.computeLayout();
540 Align FrameAlignment = SSL.getFrameAlignment();
541
542 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
543 // (AlignmentSkew).
544 if (FrameAlignment > StackAlignment) {
545 // Re-align the base pointer according to the max requested alignment.
546 IRB.SetInsertPoint(BasePointer->getNextNode());
547 BasePointer = IRB.CreateIntrinsic(
548 StackPtrTy, Intrinsic::ptrmask,
549 {BasePointer, ConstantInt::get(AddrTy, ~(FrameAlignment.value() - 1))});
550 }
551
552 IRB.SetInsertPoint(BasePointer->getNextNode());
553
554 if (StackGuardSlot) {
555 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
556 Value *Off =
557 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
558 Value *NewAI =
559 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
560
561 // Replace alloc with the new location.
562 StackGuardSlot->replaceAllUsesWith(NewAI);
563 StackGuardSlot->eraseFromParent();
564 }
565
566 for (Argument *Arg : ByValArguments) {
567 unsigned Offset = SSL.getObjectOffset(Arg);
568 MaybeAlign Align(SSL.getObjectAlignment(Arg));
569 Type *Ty = Arg->getParamByValType();
570
571 uint64_t Size = DL.getTypeStoreSize(Ty);
572 if (Size == 0)
573 Size = 1; // Don't create zero-sized stack objects.
574
575 Value *Off =
576 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
577 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
578 Arg->getName() + ".unsafe-byval");
579
580 // Replace alloc with the new location.
581 replaceDbgDeclare(Arg, BasePointer, DIB, DIExpression::ApplyOffset,
582 -Offset);
583 Arg->replaceAllUsesWith(NewArg);
585 IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size);
586 }
587
588 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
589 for (AllocaInst *AI : StaticAllocas) {
590 IRB.SetInsertPoint(AI);
591 unsigned Offset = SSL.getObjectOffset(AI);
592
593 replaceDbgDeclare(AI, BasePointer, DIB, DIExpression::ApplyOffset, -Offset);
594 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
595
596 // Replace uses of the alloca with the new location.
597 // Insert address calculation close to each use to work around PR27844.
598 std::string Name = std::string(AI->getName()) + ".unsafe";
599 while (!AI->use_empty()) {
600 Use &U = *AI->use_begin();
601 Instruction *User = cast<Instruction>(U.getUser());
602
603 // Drop lifetime markers now that this is no longer an alloca.
604 // SafeStack has already performed its own stack coloring.
605 if (User->isLifetimeStartOrEnd()) {
606 User->eraseFromParent();
607 continue;
608 }
609
610 Instruction *InsertBefore;
611 if (auto *PHI = dyn_cast<PHINode>(User))
612 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
613 else
614 InsertBefore = User;
615
616 IRBuilder<> IRBUser(InsertBefore);
617 Value *Off =
618 IRBUser.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
619 Value *Replacement =
620 IRBUser.CreateAddrSpaceCast(Off, AI->getType(), Name);
621
622 if (auto *PHI = dyn_cast<PHINode>(User))
623 // PHI nodes may have multiple incoming edges from the same BB (why??),
624 // all must be updated at once with the same incoming value.
625 PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement);
626 else
627 U.set(Replacement);
628 }
629
630 AI->eraseFromParent();
631 }
632
633 // Re-align BasePointer so that our callees would see it aligned as
634 // expected.
635 // FIXME: no need to update BasePointer in leaf functions.
636 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
637
638 MDBuilder MDB(F.getContext());
640 Data.push_back(MDB.createString("unsafe-stack-size"));
641 Data.push_back(MDB.createConstant(ConstantInt::get(Int32Ty, FrameSize)));
642 MDNode *MD = MDTuple::get(F.getContext(), Data);
643 F.setMetadata(LLVMContext::MD_annotation, MD);
644
645 // Update shadow stack pointer in the function epilogue.
646 IRB.SetInsertPoint(BasePointer->getNextNode());
647
648 Value *StaticTop =
649 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
650 "unsafe_stack_static_top");
651 IRB.CreateStore(StaticTop, UnsafeStackPtr);
652 return StaticTop;
653}
654
655void SafeStack::moveDynamicAllocasToUnsafeStack(
656 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
657 ArrayRef<AllocaInst *> DynamicAllocas) {
658 DIBuilder DIB(*F.getParent());
659
660 for (AllocaInst *AI : DynamicAllocas) {
661 IRBuilder<> IRB(AI);
662
663 // Compute the new SP value (after AI).
664 Value *Size = IRB.CreateAllocationSize(AddrTy, AI);
665 Value *SP = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
666 SP = IRB.CreatePtrAdd(SP, IRB.CreateNeg(Size));
667
668 // Align the SP value to satisfy the AllocaInst and stack alignments.
669 auto Align = std::max(AI->getAlign(), StackAlignment);
670
671 Value *NewTop = IRB.CreateIntrinsic(
672 StackPtrTy, Intrinsic::ptrmask,
673 {SP, ConstantInt::getSigned(AddrTy, ~uint64_t(Align.value() - 1))});
674
675 // Save the stack pointer.
676 IRB.CreateStore(NewTop, UnsafeStackPtr);
677 if (DynamicTop)
678 IRB.CreateStore(NewTop, DynamicTop);
679
680 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
681 if (AI->hasName() && isa<Instruction>(NewAI))
682 NewAI->takeName(AI);
683
685 AI->replaceAllUsesWith(NewAI);
686 AI->eraseFromParent();
687 }
688
689 if (!DynamicAllocas.empty()) {
690 // Now go through the instructions again, replacing stacksave/stackrestore.
691 for (Instruction &I : llvm::make_early_inc_range(instructions(&F))) {
692 auto *II = dyn_cast<IntrinsicInst>(&I);
693 if (!II)
694 continue;
695
696 if (II->getIntrinsicID() == Intrinsic::stacksave) {
697 IRBuilder<> IRB(II);
698 Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
699 LI->takeName(II);
700 II->replaceAllUsesWith(LI);
701 II->eraseFromParent();
702 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
703 IRBuilder<> IRB(II);
704 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
705 SI->takeName(II);
706 assert(II->use_empty());
707 II->eraseFromParent();
708 }
709 }
710 }
711}
712
713bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
715 if (CI.hasFnAttr(Attribute::AlwaysInline) &&
716 isInlineViable(*Callee).isSuccess())
717 return true;
718 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
719 CI.isNoInline())
720 return false;
721 return true;
722}
723
724void SafeStack::TryInlinePointerAddress() {
725 auto *CI = dyn_cast<CallInst>(UnsafeStackPtr);
726 if (!CI)
727 return;
728
729 if(F.hasOptNone())
730 return;
731
733 if (!Callee || Callee->isDeclaration())
734 return;
735
736 if (!ShouldInlinePointerAddress(*CI))
737 return;
738
739 InlineFunctionInfo IFI;
740 InlineFunction(*CI, IFI);
741}
742
743bool SafeStack::run() {
744 assert(F.hasFnAttribute(Attribute::SafeStack) &&
745 "Can't run SafeStack on a function without the attribute");
746 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
747
748 ++NumFunctions;
749
750 SmallVector<AllocaInst *, 16> StaticAllocas;
751 SmallVector<AllocaInst *, 4> DynamicAllocas;
752 SmallVector<Argument *, 4> ByValArguments;
753 SmallVector<Instruction *, 4> Returns;
754
755 // Collect all points where stack gets unwound and needs to be restored
756 // This is only necessary because the runtime (setjmp and unwind code) is
757 // not aware of the unsafe stack and won't unwind/restore it properly.
758 // To work around this problem without changing the runtime, we insert
759 // instrumentation to restore the unsafe stack pointer when necessary.
760 SmallVector<Instruction *, 4> StackRestorePoints;
761
762 // Find all static and dynamic alloca instructions that must be moved to the
763 // unsafe stack, all return instructions and stack restore points.
764 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
765 StackRestorePoints);
766
767 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
768 ByValArguments.empty() && StackRestorePoints.empty())
769 return false; // Nothing to do in this function.
770
771 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
772 !ByValArguments.empty())
773 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
774
775 if (!StackRestorePoints.empty())
776 ++NumUnsafeStackRestorePointsFunctions;
777
778 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
779 // Calls must always have a debug location, or else inlining breaks. So
780 // we explicitly set a artificial debug location here.
781 if (DISubprogram *SP = F.getSubprogram())
783 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP));
785 // FIXME: A more correct implementation of SafeStackUsePointerAddress would
786 // change the libcall availability in RuntimeLibcallsInfo
787 StringRef SafestackPointerAddressName =
789 RTLIB::impl___safestack_pointer_address);
790
791 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
792 SafestackPointerAddressName, IRB.getPtrTy(0));
793 UnsafeStackPtr = IRB.CreateCall(Fn);
794 } else {
795 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB, Libcalls);
796 if (!UnsafeStackPtr) {
797 F.getContext().emitError(
798 "no location available for safestack pointer address");
799 UnsafeStackPtr = PoisonValue::get(StackPtrTy);
800 }
801 }
802
803 // Load the current stack pointer (we'll also use it as a base pointer).
804 // FIXME: use a dedicated register for it ?
805 Instruction *BasePointer =
806 IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
807 assert(BasePointer->getType() == StackPtrTy);
808
809 AllocaInst *StackGuardSlot = nullptr;
810 // FIXME: implement weaker forms of stack protector.
811 if (F.hasFnAttribute(Attribute::StackProtect) ||
812 F.hasFnAttribute(Attribute::StackProtectStrong) ||
813 F.hasFnAttribute(Attribute::StackProtectReq)) {
814 Value *StackGuard = getStackGuard(IRB, F);
815 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
816 IRB.CreateStore(StackGuard, StackGuardSlot);
817
818 for (Instruction *RI : Returns) {
819 IRBuilder<> IRBRet(RI);
820 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
821 }
822 }
823
824 // The top of the unsafe stack after all unsafe static allocas are
825 // allocated.
826 Value *StaticTop = moveStaticAllocasToUnsafeStack(
827 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
828
829 // Safe stack object that stores the current unsafe stack top. It is updated
830 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
831 // This is only needed if we need to restore stack pointer after longjmp
832 // or exceptions, and we have dynamic allocations.
833 // FIXME: a better alternative might be to store the unsafe stack pointer
834 // before setjmp / invoke instructions.
835 AllocaInst *DynamicTop = createStackRestorePoints(
836 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
837
838 // Handle dynamic allocas.
839 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
840 DynamicAllocas);
841
842 // Restore the unsafe stack pointer before each return.
843 for (Instruction *RI : Returns) {
844 IRB.SetInsertPoint(RI);
845 IRB.CreateStore(BasePointer, UnsafeStackPtr);
846 }
847
848 TryInlinePointerAddress();
849
850 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
851 return true;
852}
853
854class SafeStackLegacyPass : public FunctionPass {
855 const TargetMachine *TM = nullptr;
856
857public:
858 static char ID; // Pass identification, replacement for typeid..
859
860 SafeStackLegacyPass() : FunctionPass(ID) {}
861
862 void getAnalysisUsage(AnalysisUsage &AU) const override {
863 AU.addRequired<LibcallLoweringInfoWrapper>();
864 AU.addRequired<TargetPassConfig>();
865 AU.addRequired<TargetLibraryInfoWrapperPass>();
866 AU.addRequired<AssumptionCacheTracker>();
867 AU.addPreserved<DominatorTreeWrapperPass>();
868 }
869
870 bool runOnFunction(Function &F) override {
871 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
872
873 if (!F.hasFnAttribute(Attribute::SafeStack)) {
874 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
875 " for this function\n");
876 return false;
877 }
878
879 if (F.isDeclaration()) {
880 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
881 " is not available\n");
882 return false;
883 }
884
885 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
886 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
887 auto *TL = Subtarget->getTargetLowering();
888 if (!TL)
889 report_fatal_error("TargetLowering instance is required");
890
891 const LibcallLoweringInfo &Libcalls =
892 getAnalysis<LibcallLoweringInfoWrapper>().getLibcallLowering(
893 *F.getParent(), *Subtarget);
894
895 auto *DL = &F.getDataLayout();
896 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
897 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
898
899 // Compute DT and LI only for functions that have the attribute.
900 // This is only useful because the legacy pass manager doesn't let us
901 // compute analyzes lazily.
902
903 DominatorTree *DT;
904 bool ShouldPreserveDominatorTree;
905 std::optional<DominatorTree> LazilyComputedDomTree;
906
907 // Do we already have a DominatorTree available from the previous pass?
908 // Note that we should *NOT* require it, to avoid the case where we end up
909 // not needing it, but the legacy PM would have computed it for us anyways.
910 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
911 DT = &DTWP->getDomTree();
912 ShouldPreserveDominatorTree = true;
913 } else {
914 // Otherwise, we need to compute it.
915 LazilyComputedDomTree.emplace(F);
916 DT = &*LazilyComputedDomTree;
917 ShouldPreserveDominatorTree = false;
918 }
919
920 // Likewise, lazily compute loop info.
921 LoopInfo LI(*DT);
922
923 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
924
925 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
926
927 return SafeStack(F, *TL, Libcalls, *DL,
928 ShouldPreserveDominatorTree ? &DTU : nullptr, SE)
929 .run();
930 }
931};
932
933} // end anonymous namespace
934
937 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
938
939 if (!F.hasFnAttribute(Attribute::SafeStack)) {
940 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
941 " for this function\n");
942 return PreservedAnalyses::all();
943 }
944
945 if (F.isDeclaration()) {
946 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
947 " is not available\n");
948 return PreservedAnalyses::all();
949 }
950
951 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
952 auto *TL = Subtarget->getTargetLowering();
953
954 auto &DL = F.getDataLayout();
955
956 // preserve DominatorTree
957 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
958 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
959
960 auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
961 const LibcallLoweringModuleAnalysisResult *LibcallLowering =
962 MAMProxy.getCachedResult<LibcallLoweringModuleAnalysis>(*F.getParent());
963
964 if (!LibcallLowering) {
965 F.getContext().emitError("'" + LibcallLoweringModuleAnalysis::name() +
966 "' analysis required");
967 return PreservedAnalyses::all();
968 }
969
970 const LibcallLoweringInfo &Libcalls =
971 LibcallLowering->getLibcallLowering(*Subtarget);
972
973 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
974
975 bool Changed = SafeStack(F, *TL, Libcalls, DL, &DTU, SE).run();
976
977 if (!Changed)
978 return PreservedAnalyses::all();
981 return PA;
982}
983
984char SafeStackLegacyPass::ID = 0;
985
987 "Safe Stack instrumentation pass", false, false)
991INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
992 "Safe Stack instrumentation pass", false, false)
993
994FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer.
static cl::opt< bool > ClColoring("safe-stack-coloring", cl::desc("enable safe stack coloring"), cl::Hidden, cl::init(true))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase &TLI, const LibcallLoweringInfo &Libcalls, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static BranchProbability getBranchProbStackProtector(bool IsLikely)
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isNoInline() const
Return true if the call should not be inlined.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
This class represents a function call, abstracting a target machine's calling convention.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1837
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:686
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2223
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2025
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2312
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition IRBuilder.h:1788
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2176
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1854
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1867
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2487
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:604
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:599
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2787
bool isSuccess() const
Definition InlineCost.h:190
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Tracks which library functions to use for a particular subtarget.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
Record a mapping from subtarget to LibcallLoweringInfo.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1529
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
Returns the target-specific address of the unsafe stack pointer.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
use_iterator use_begin()
Definition Value.h:364
bool use_empty() const
Definition Value.h:346
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
LLVM_ABI FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:2000
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1960
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
static constexpr Align Constant()
Allow constructions of constexpr Align.
Definition Alignment.h:88
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.