LLVM 17.0.0git
StackProtector.cpp
Go to the documentation of this file.
1//===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass inserts stack protectors into functions which need them. A variable
10// with a random value in it is stored onto the stack before the local variables
11// are allocated. Upon exiting the block, the stored value is checked. If it's
12// changed, then there was some sort of violation and the program aborts.
13//
14//===----------------------------------------------------------------------===//
15
19#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/Passes.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/Instruction.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/Module.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/User.h"
45#include "llvm/Pass.h"
51#include <optional>
52#include <utility>
53
54using namespace llvm;
55
56#define DEBUG_TYPE "stack-protector"
57
58STATISTIC(NumFunProtected, "Number of functions protected");
59STATISTIC(NumAddrTaken, "Number of local variables that have their address"
60 " taken.");
61
62static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
63 cl::init(true), cl::Hidden);
64static cl::opt<bool> DisableCheckNoReturn("disable-check-noreturn-call",
65 cl::init(false), cl::Hidden);
66
67char StackProtector::ID = 0;
68
71}
72
74 "Insert stack protectors", false, true)
78 "Insert stack protectors", false, true)
79
81
85}
86
88 F = &Fn;
89 M = F->getParent();
90 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
91 DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
92 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
93 Trip = TM->getTargetTriple();
94 TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
95 HasPrologue = false;
96 HasIRCheck = false;
97
98 SSPBufferSize = Fn.getFnAttributeAsParsedInteger(
99 "stack-protector-buffer-size", DefaultSSPBufferSize);
100 if (!requiresStackProtector(F, &Layout))
101 return false;
102
103 // TODO(etienneb): Functions with funclets are not correctly supported now.
104 // Do nothing if this is funclet-based personality.
105 if (Fn.hasPersonalityFn()) {
107 if (isFuncletEHPersonality(Personality))
108 return false;
109 }
110
111 ++NumFunProtected;
112 bool Changed = InsertStackProtectors();
113#ifdef EXPENSIVE_CHECKS
114 assert((!DTU ||
115 DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full)) &&
116 "Failed to maintain validity of domtree!");
117#endif
118 DTU.reset();
119 return Changed;
120}
121
122/// \param [out] IsLarge is set to true if a protectable array is found and
123/// it is "large" ( >= ssp-buffer-size). In the case of a structure with
124/// multiple arrays, this gets set if any of them is large.
125static bool ContainsProtectableArray(Type *Ty, Module *M, unsigned SSPBufferSize,
126 bool &IsLarge, bool Strong,
127 bool InStruct) {
128 if (!Ty)
129 return false;
130 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
131 if (!AT->getElementType()->isIntegerTy(8)) {
132 // If we're on a non-Darwin platform or we're inside of a structure, don't
133 // add stack protectors unless the array is a character array.
134 // However, in strong mode any array, regardless of type and size,
135 // triggers a protector.
136 if (!Strong && (InStruct || !Triple(M->getTargetTriple()).isOSDarwin()))
137 return false;
138 }
139
140 // If an array has more than SSPBufferSize bytes of allocated space, then we
141 // emit stack protectors.
142 if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
143 IsLarge = true;
144 return true;
145 }
146
147 if (Strong)
148 // Require a protector for all arrays in strong mode
149 return true;
150 }
151
152 const StructType *ST = dyn_cast<StructType>(Ty);
153 if (!ST)
154 return false;
155
156 bool NeedsProtector = false;
157 for (Type *ET : ST->elements())
158 if (ContainsProtectableArray(ET, M, SSPBufferSize, IsLarge, Strong, true)) {
159 // If the element is a protectable array and is large (>= SSPBufferSize)
160 // then we are done. If the protectable array is not large, then
161 // keep looking in case a subsequent element is a large array.
162 if (IsLarge)
163 return true;
164 NeedsProtector = true;
165 }
166
167 return NeedsProtector;
168}
169
170/// Check whether a stack allocation has its address taken.
171static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
172 Module *M,
174 const DataLayout &DL = M->getDataLayout();
175 for (const User *U : AI->users()) {
176 const auto *I = cast<Instruction>(U);
177 // If this instruction accesses memory make sure it doesn't access beyond
178 // the bounds of the allocated object.
179 std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
180 if (MemLoc && MemLoc->Size.hasValue() &&
181 !TypeSize::isKnownGE(AllocSize,
182 TypeSize::getFixed(MemLoc->Size.getValue())))
183 return true;
184 switch (I->getOpcode()) {
185 case Instruction::Store:
186 if (AI == cast<StoreInst>(I)->getValueOperand())
187 return true;
188 break;
189 case Instruction::AtomicCmpXchg:
190 // cmpxchg conceptually includes both a load and store from the same
191 // location. So, like store, the value being stored is what matters.
192 if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
193 return true;
194 break;
195 case Instruction::PtrToInt:
196 if (AI == cast<PtrToIntInst>(I)->getOperand(0))
197 return true;
198 break;
199 case Instruction::Call: {
200 // Ignore intrinsics that do not become real instructions.
201 // TODO: Narrow this to intrinsics that have store-like effects.
202 const auto *CI = cast<CallInst>(I);
203 if (!CI->isDebugOrPseudoInst() && !CI->isLifetimeStartOrEnd())
204 return true;
205 break;
206 }
207 case Instruction::Invoke:
208 return true;
209 case Instruction::GetElementPtr: {
210 // If the GEP offset is out-of-bounds, or is non-constant and so has to be
211 // assumed to be potentially out-of-bounds, then any memory access that
212 // would use it could also be out-of-bounds meaning stack protection is
213 // required.
214 const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
215 unsigned IndexSize = DL.getIndexTypeSizeInBits(I->getType());
216 APInt Offset(IndexSize, 0);
217 if (!GEP->accumulateConstantOffset(DL, Offset))
218 return true;
219 TypeSize OffsetSize = TypeSize::Fixed(Offset.getLimitedValue());
220 if (!TypeSize::isKnownGT(AllocSize, OffsetSize))
221 return true;
222 // Adjust AllocSize to be the space remaining after this offset.
223 // We can't subtract a fixed size from a scalable one, so in that case
224 // assume the scalable value is of minimum size.
225 TypeSize NewAllocSize =
226 TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
227 if (HasAddressTaken(I, NewAllocSize, M, VisitedPHIs))
228 return true;
229 break;
230 }
231 case Instruction::BitCast:
232 case Instruction::Select:
233 case Instruction::AddrSpaceCast:
234 if (HasAddressTaken(I, AllocSize, M, VisitedPHIs))
235 return true;
236 break;
237 case Instruction::PHI: {
238 // Keep track of what PHI nodes we have already visited to ensure
239 // they are only visited once.
240 const auto *PN = cast<PHINode>(I);
241 if (VisitedPHIs.insert(PN).second)
242 if (HasAddressTaken(PN, AllocSize, M, VisitedPHIs))
243 return true;
244 break;
245 }
246 case Instruction::Load:
247 case Instruction::AtomicRMW:
248 case Instruction::Ret:
249 // These instructions take an address operand, but have load-like or
250 // other innocuous behavior that should not trigger a stack protector.
251 // atomicrmw conceptually has both load and store semantics, but the
252 // value being stored must be integer; so if a pointer is being stored,
253 // we'll catch it in the PtrToInt case above.
254 break;
255 default:
256 // Conservatively return true for any instruction that takes an address
257 // operand, but is not handled above.
258 return true;
259 }
260 }
261 return false;
262}
263
264/// Search for the first call to the llvm.stackprotector intrinsic and return it
265/// if present.
267 for (const BasicBlock &BB : F)
268 for (const Instruction &I : BB)
269 if (const auto *II = dyn_cast<IntrinsicInst>(&I))
270 if (II->getIntrinsicID() == Intrinsic::stackprotector)
271 return II;
272 return nullptr;
273}
274
275/// Check whether or not this function needs a stack protector based
276/// upon the stack protector level.
277///
278/// We use two heuristics: a standard (ssp) and strong (sspstrong).
279/// The standard heuristic which will add a guard variable to functions that
280/// call alloca with a either a variable size or a size >= SSPBufferSize,
281/// functions with character buffers larger than SSPBufferSize, and functions
282/// with aggregates containing character buffers larger than SSPBufferSize. The
283/// strong heuristic will add a guard variables to functions that call alloca
284/// regardless of size, functions with any buffer regardless of type and size,
285/// functions with aggregates that contain any buffer regardless of type and
286/// size, and functions that contain stack-based variables that have had their
287/// address taken.
289 Module *M = F->getParent();
290 bool Strong = false;
291 bool NeedsProtector = false;
292
293 // The set of PHI nodes visited when determining if a variable's reference has
294 // been taken. This set is maintained to ensure we don't visit the same PHI
295 // node multiple times.
297
298 unsigned SSPBufferSize = F->getFnAttributeAsParsedInteger(
299 "stack-protector-buffer-size", DefaultSSPBufferSize);
300
301 if (F->hasFnAttribute(Attribute::SafeStack))
302 return false;
303
304 // We are constructing the OptimizationRemarkEmitter on the fly rather than
305 // using the analysis pass to avoid building DominatorTree and LoopInfo which
306 // are not available this late in the IR pipeline.
308
309 if (F->hasFnAttribute(Attribute::StackProtectReq)) {
310 if (!Layout)
311 return true;
312 ORE.emit([&]() {
313 return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
314 << "Stack protection applied to function "
315 << ore::NV("Function", F)
316 << " due to a function attribute or command-line switch";
317 });
318 NeedsProtector = true;
319 Strong = true; // Use the same heuristic as strong to determine SSPLayout
320 } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
321 Strong = true;
322 else if (!F->hasFnAttribute(Attribute::StackProtect))
323 return false;
324
325 for (const BasicBlock &BB : *F) {
326 for (const Instruction &I : BB) {
327 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
328 if (AI->isArrayAllocation()) {
329 auto RemarkBuilder = [&]() {
330 return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
331 &I)
332 << "Stack protection applied to function "
333 << ore::NV("Function", F)
334 << " due to a call to alloca or use of a variable length "
335 "array";
336 };
337 if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
338 if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
339 // A call to alloca with size >= SSPBufferSize requires
340 // stack protectors.
341 if (!Layout)
342 return true;
343 Layout->insert(
344 std::make_pair(AI, MachineFrameInfo::SSPLK_LargeArray));
345 ORE.emit(RemarkBuilder);
346 NeedsProtector = true;
347 } else if (Strong) {
348 // Require protectors for all alloca calls in strong mode.
349 if (!Layout)
350 return true;
351 Layout->insert(
352 std::make_pair(AI, MachineFrameInfo::SSPLK_SmallArray));
353 ORE.emit(RemarkBuilder);
354 NeedsProtector = true;
355 }
356 } else {
357 // A call to alloca with a variable size requires protectors.
358 if (!Layout)
359 return true;
360 Layout->insert(
361 std::make_pair(AI, MachineFrameInfo::SSPLK_LargeArray));
362 ORE.emit(RemarkBuilder);
363 NeedsProtector = true;
364 }
365 continue;
366 }
367
368 bool IsLarge = false;
369 if (ContainsProtectableArray(AI->getAllocatedType(), M, SSPBufferSize,
370 IsLarge, Strong, false)) {
371 if (!Layout)
372 return true;
373 Layout->insert(std::make_pair(
376 ORE.emit([&]() {
377 return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
378 << "Stack protection applied to function "
379 << ore::NV("Function", F)
380 << " due to a stack allocated buffer or struct containing a "
381 "buffer";
382 });
383 NeedsProtector = true;
384 continue;
385 }
386
387 if (Strong &&
389 AI, M->getDataLayout().getTypeAllocSize(AI->getAllocatedType()),
390 M, VisitedPHIs)) {
391 ++NumAddrTaken;
392 if (!Layout)
393 return true;
394 Layout->insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
395 ORE.emit([&]() {
396 return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
397 &I)
398 << "Stack protection applied to function "
399 << ore::NV("Function", F)
400 << " due to the address of a local variable being taken";
401 });
402 NeedsProtector = true;
403 }
404 // Clear any PHIs that we visited, to make sure we examine all uses of
405 // any subsequent allocas that we look at.
406 VisitedPHIs.clear();
407 }
408 }
409 }
410
411 return NeedsProtector;
412}
413
414/// Create a stack guard loading and populate whether SelectionDAG SSP is
415/// supported.
417 IRBuilder<> &B,
418 bool *SupportsSelectionDAGSP = nullptr) {
419 Value *Guard = TLI->getIRStackGuard(B);
420 StringRef GuardMode = M->getStackProtectorGuard();
421 if ((GuardMode == "tls" || GuardMode.empty()) && Guard)
422 return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard");
423
424 // Use SelectionDAG SSP handling, since there isn't an IR guard.
425 //
426 // This is more or less weird, since we optionally output whether we
427 // should perform a SelectionDAG SP here. The reason is that it's strictly
428 // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
429 // mutating. There is no way to get this bit without mutating the IR, so
430 // getting this bit has to happen in this right time.
431 //
432 // We could have define a new function TLI::supportsSelectionDAGSP(), but that
433 // will put more burden on the backends' overriding work, especially when it
434 // actually conveys the same information getIRStackGuard() already gives.
435 if (SupportsSelectionDAGSP)
436 *SupportsSelectionDAGSP = true;
437 TLI->insertSSPDeclarations(*M);
438 return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
439}
440
441/// Insert code into the entry block that stores the stack guard
442/// variable onto the stack:
443///
444/// entry:
445/// StackGuardSlot = alloca i8*
446/// StackGuard = <stack guard>
447/// call void @llvm.stackprotector(StackGuard, StackGuardSlot)
448///
449/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
450/// node.
451static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
452 const TargetLoweringBase *TLI, AllocaInst *&AI) {
453 bool SupportsSelectionDAGSP = false;
454 IRBuilder<> B(&F->getEntryBlock().front());
455 PointerType *PtrTy = Type::getInt8PtrTy(CheckLoc->getContext());
456 AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
457
458 Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
459 B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
460 {GuardSlot, AI});
461 return SupportsSelectionDAGSP;
462}
463
464/// InsertStackProtectors - Insert code into the prologue and epilogue of the
465/// function.
466///
467/// - The prologue code loads and stores the stack guard onto the stack.
468/// - The epilogue checks the value stored in the prologue against the original
469/// value. It calls __stack_chk_fail if they differ.
470bool StackProtector::InsertStackProtectors() {
471 // If the target wants to XOR the frame pointer into the guard value, it's
472 // impossible to emit the check in IR, so the target *must* support stack
473 // protection in SDAG.
474 bool SupportsSelectionDAGSP =
475 TLI->useStackGuardXorFP() ||
477 AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
478 BasicBlock *FailBB = nullptr;
479
480 for (BasicBlock &BB : llvm::make_early_inc_range(*F)) {
481 // This is stack protector auto generated check BB, skip it.
482 if (&BB == FailBB)
483 continue;
484 Instruction *CheckLoc = dyn_cast<ReturnInst>(BB.getTerminator());
485 if (!CheckLoc && !DisableCheckNoReturn)
486 for (auto &Inst : BB)
487 if (auto *CB = dyn_cast<CallBase>(&Inst))
488 // Do stack check before noreturn calls that aren't nounwind (e.g:
489 // __cxa_throw).
490 if (CB->doesNotReturn() && !CB->doesNotThrow()) {
491 CheckLoc = CB;
492 break;
493 }
494
495 if (!CheckLoc)
496 continue;
497
498 // Generate prologue instrumentation if not already generated.
499 if (!HasPrologue) {
500 HasPrologue = true;
501 SupportsSelectionDAGSP &= CreatePrologue(F, M, CheckLoc, TLI, AI);
502 }
503
504 // SelectionDAG based code generation. Nothing else needs to be done here.
505 // The epilogue instrumentation is postponed to SelectionDAG.
506 if (SupportsSelectionDAGSP)
507 break;
508
509 // Find the stack guard slot if the prologue was not created by this pass
510 // itself via a previous call to CreatePrologue().
511 if (!AI) {
512 const CallInst *SPCall = findStackProtectorIntrinsic(*F);
513 assert(SPCall && "Call to llvm.stackprotector is missing");
514 AI = cast<AllocaInst>(SPCall->getArgOperand(1));
515 }
516
517 // Set HasIRCheck to true, so that SelectionDAG will not generate its own
518 // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
519 // instrumentation has already been generated.
520 HasIRCheck = true;
521
522 // If we're instrumenting a block with a tail call, the check has to be
523 // inserted before the call rather than between it and the return. The
524 // verifier guarantees that a tail call is either directly before the
525 // return or with a single correct bitcast of the return value in between so
526 // we don't need to worry about many situations here.
527 Instruction *Prev = CheckLoc->getPrevNonDebugInstruction();
528 if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
529 CheckLoc = Prev;
530 else if (Prev) {
531 Prev = Prev->getPrevNonDebugInstruction();
532 if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
533 CheckLoc = Prev;
534 }
535
536 // Generate epilogue instrumentation. The epilogue intrumentation can be
537 // function-based or inlined depending on which mechanism the target is
538 // providing.
539 if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) {
540 // Generate the function-based epilogue instrumentation.
541 // The target provides a guard check function, generate a call to it.
542 IRBuilder<> B(CheckLoc);
543 LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard");
544 CallInst *Call = B.CreateCall(GuardCheck, {Guard});
545 Call->setAttributes(GuardCheck->getAttributes());
546 Call->setCallingConv(GuardCheck->getCallingConv());
547 } else {
548 // Generate the epilogue with inline instrumentation.
549 // If we do not support SelectionDAG based calls, generate IR level
550 // calls.
551 //
552 // For each block with a return instruction, convert this:
553 //
554 // return:
555 // ...
556 // ret ...
557 //
558 // into this:
559 //
560 // return:
561 // ...
562 // %1 = <stack guard>
563 // %2 = load StackGuardSlot
564 // %3 = icmp ne i1 %1, %2
565 // br i1 %3, label %CallStackCheckFailBlk, label %SP_return
566 //
567 // SP_return:
568 // ret ...
569 //
570 // CallStackCheckFailBlk:
571 // call void @__stack_chk_fail()
572 // unreachable
573
574 // Create the FailBB. We duplicate the BB every time since the MI tail
575 // merge pass will merge together all of the various BB into one including
576 // fail BB generated by the stack protector pseudo instruction.
577 if (!FailBB)
578 FailBB = CreateFailBB();
579
580 IRBuilder<> B(CheckLoc);
581 Value *Guard = getStackGuard(TLI, M, B);
582 LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true);
583 auto *Cmp = cast<ICmpInst>(B.CreateICmpNE(Guard, LI2));
584 auto SuccessProb =
586 auto FailureProb =
588 MDNode *Weights = MDBuilder(F->getContext())
589 .createBranchWeights(FailureProb.getNumerator(),
590 SuccessProb.getNumerator());
591
592 SplitBlockAndInsertIfThen(Cmp, CheckLoc,
593 /*Unreachable=*/false, Weights,
594 DTU ? &*DTU : nullptr,
595 /*LI=*/nullptr, /*ThenBlock=*/FailBB);
596
597 auto *BI = cast<BranchInst>(Cmp->getParent()->getTerminator());
598 BasicBlock *NewBB = BI->getSuccessor(1);
599 NewBB->setName("SP_return");
600 NewBB->moveAfter(&BB);
601
602 Cmp->setPredicate(Cmp->getInversePredicate());
603 BI->swapSuccessors();
604 }
605 }
606
607 // Return if we didn't modify any basic blocks. i.e., there are no return
608 // statements in the function.
609 return HasPrologue;
610}
611
612/// CreateFailBB - Create a basic block to jump to when the stack protector
613/// check fails.
614BasicBlock *StackProtector::CreateFailBB() {
616 BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
617 IRBuilder<> B(FailBB);
618 if (F->getSubprogram())
619 B.SetCurrentDebugLocation(
620 DILocation::get(Context, 0, 0, F->getSubprogram()));
621 FunctionCallee StackChkFail;
623 if (Trip.isOSOpenBSD()) {
624 StackChkFail = M->getOrInsertFunction("__stack_smash_handler",
625 Type::getVoidTy(Context),
626 Type::getInt8PtrTy(Context));
627 Args.push_back(B.CreateGlobalStringPtr(F->getName(), "SSH"));
628 } else {
629 StackChkFail =
630 M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
631 }
632 cast<Function>(StackChkFail.getCallee())->addFnAttr(Attribute::NoReturn);
633 B.CreateCall(StackChkFail, Args);
634 B.CreateUnreachable();
635 return FailBB;
636}
637
639 return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator());
640}
641
643 if (Layout.empty())
644 return;
645
646 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
647 if (MFI.isDeadObjectIndex(I))
648 continue;
649
650 const AllocaInst *AI = MFI.getObjectAllocation(I);
651 if (!AI)
652 continue;
653
654 SSPLayoutMap::const_iterator LI = Layout.find(AI);
655 if (LI == Layout.end())
656 continue;
657
658 MFI.setObjectSSPLayout(I, LI->second);
659 }
660}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define DEBUG_TYPE
Hexagon Common GEP
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize, Module *M, SmallPtrSet< const PHINode *, 16 > &VisitedPHIs)
Check whether a stack allocation has its address taken.
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
static cl::opt< bool > DisableCheckNoReturn("disable-check-noreturn-call", cl::init(false), cl::Hidden)
static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc, const TargetLoweringBase *TLI, AllocaInst *&AI)
Insert code into the entry block that stores the stack guard variable onto the stack:
Insert stack protectors
static bool ContainsProtectableArray(Type *Ty, Module *M, unsigned SSPBufferSize, bool &IsLarge, bool Strong, bool InStruct)
static cl::opt< bool > EnableSelectionDAGSP("enable-selectiondag-sp", cl::init(true), cl::Hidden)
static const CallInst * findStackProtectorIntrinsic(Function &F)
Search for the first call to the llvm.stackprotector intrinsic and return it if present.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
Definition: APInt.h:75
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
Definition: BasicBlock.cpp:141
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
static BranchProbability getBranchProbStackProtector(bool IsLikely)
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1357
This class represents a function call, abstracting a target machine's calling convention.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
Definition: Function.cpp:678
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1725
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:817
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1961
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2564
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
const AllocaInst * getObjectAllocation(int ObjectIdx) const
Return the underlying Alloca of the specified stack object if it exists.
@ SSPLK_SmallArray
Array or nested array < SSP-buffer-size.
@ SSPLK_LargeArray
Array or nested array >= SSP-buffer-size.
@ SSPLK_AddrOf
The address of this allocation is exposed and triggered protection.
void setObjectSSPLayout(int ObjectIdx, SSPLayoutKind Kind)
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
Definition: Module.cpp:144
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool shouldEmitSDCheck(const BasicBlock &BB) const
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
static bool requiresStackProtector(Function *F, SSPLayoutMap *Layout=nullptr)
Check whether or not F needs a stack protector based upon the stack protector level.
bool runOnFunction(Function &Fn) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
Class to represent struct types.
Definition: DerivedTypes.h:213
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
TargetOptions Options
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
Target-Independent Code Generator Pass Configuration Options.
virtual const TargetLowering * getTargetLowering() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSOpenBSD() const
Definition: Triple.h:541
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:519
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:322
static constexpr TypeSize Fixed(ScalarTy ExactSize)
Definition: TypeSize.h:331
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getVoidTy(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
LLVM Value Representation.
Definition: Value.h:74
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
iterator_range< user_iterator > users()
Definition: Value.h:421
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:163
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:205
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:219
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1465
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:748
FunctionPass * createStackProtectorPass()
createStackProtectorPass - This pass adds stack protectors to functions.
void initializeStackProtectorPass(PassRegistry &)
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...