LLVM 23.0.0git
X86SpeculativeLoadHardening.cpp
Go to the documentation of this file.
1//====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Provide a pass which mitigates speculative execution attacks which operate
11/// by speculating incorrectly past some predicate (a type check, bounds check,
12/// or other condition) to reach a load with invalid inputs and leak the data
13/// accessed by that load using a side channel out of the speculative domain.
14///
15/// For details on the attacks, see the first variant in both the Project Zero
16/// writeup and the Spectre paper:
17/// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
18/// https://spectreattack.com/spectre.pdf
19///
20//===----------------------------------------------------------------------===//
21
22#include "X86.h"
23#include "X86InstrInfo.h"
24#include "X86Subtarget.h"
25#include "llvm/ADT/ArrayRef.h"
26#include "llvm/ADT/DenseMap.h"
27#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SmallSet.h"
32#include "llvm/ADT/Statistic.h"
47#include "llvm/IR/DebugLoc.h"
48#include "llvm/MC/MCSchedule.h"
49#include "llvm/Pass.h"
51#include "llvm/Support/Debug.h"
54#include <cassert>
55#include <iterator>
56#include <optional>
57
58using namespace llvm;
59
60#define PASS_KEY "x86-slh"
61#define DEBUG_TYPE PASS_KEY
62
63STATISTIC(NumCondBranchesTraced, "Number of conditional branches traced");
64STATISTIC(NumBranchesUntraced, "Number of branches unable to trace");
65STATISTIC(NumAddrRegsHardened,
66 "Number of address mode used registers hardaned");
67STATISTIC(NumPostLoadRegsHardened,
68 "Number of post-load register values hardened");
69STATISTIC(NumCallsOrJumpsHardened,
70 "Number of calls or jumps requiring extra hardening");
71STATISTIC(NumInstsInserted, "Number of instructions inserted");
72STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted");
73
75 "x86-speculative-load-hardening",
76 cl::desc("Force enable speculative load hardening"), cl::init(false),
78
80 PASS_KEY "-lfence",
82 "Use LFENCE along each conditional edge to harden against speculative "
83 "loads rather than conditional movs and poisoned pointers."),
84 cl::init(false), cl::Hidden);
85
87 PASS_KEY "-post-load",
88 cl::desc("Harden the value loaded *after* it is loaded by "
89 "flushing the loaded bits to 1. This is hard to do "
90 "in general but can be done easily for GPRs."),
91 cl::init(true), cl::Hidden);
92
94 PASS_KEY "-fence-call-and-ret",
95 cl::desc("Use a full speculation fence to harden both call and ret edges "
96 "rather than a lighter weight mitigation."),
97 cl::init(false), cl::Hidden);
98
100 PASS_KEY "-ip",
101 cl::desc("Harden interprocedurally by passing our state in and out of "
102 "functions in the high bits of the stack pointer."),
103 cl::init(true), cl::Hidden);
104
105static cl::opt<bool>
107 cl::desc("Sanitize loads from memory. When disable, no "
108 "significant security is provided."),
109 cl::init(true), cl::Hidden);
110
112 PASS_KEY "-indirect",
113 cl::desc("Harden indirect calls and jumps against using speculatively "
114 "stored attacker controlled addresses. This is designed to "
115 "mitigate Spectre v1.2 style attacks."),
116 cl::init(true), cl::Hidden);
117
118namespace {
119
120constexpr StringRef X86SLHPassName = "X86 speculative load hardening";
121
122class X86SpeculativeLoadHardeningLegacy : public MachineFunctionPass {
123public:
124 X86SpeculativeLoadHardeningLegacy() : MachineFunctionPass(ID) {}
125
126 StringRef getPassName() const override { return X86SLHPassName; }
127 bool runOnMachineFunction(MachineFunction &MF) override;
128 void getAnalysisUsage(AnalysisUsage &AU) const override;
129
130 /// Pass identification, replacement for typeid.
131 static char ID;
132};
133
134class X86SpeculativeLoadHardeningImpl {
135public:
136 X86SpeculativeLoadHardeningImpl() = default;
137
138 bool run(MachineFunction &MF);
139
140private:
141 /// The information about a block's conditional terminators needed to trace
142 /// our predicate state through the exiting edges.
143 struct BlockCondInfo {
144 MachineBasicBlock *MBB;
145
146 // We mostly have one conditional branch, and in extremely rare cases have
147 // two. Three and more are so rare as to be unimportant for compile time.
148 SmallVector<MachineInstr *, 2> CondBrs;
149
150 MachineInstr *UncondBr;
151 };
152
153 /// Manages the predicate state traced through the program.
154 struct PredState {
155 Register InitialReg;
156 Register PoisonReg;
157
158 const TargetRegisterClass *RC;
159 MachineSSAUpdater SSA;
160
161 PredState(MachineFunction &MF, const TargetRegisterClass *RC)
162 : RC(RC), SSA(MF) {}
163 };
164
165 const X86Subtarget *Subtarget = nullptr;
166 MachineRegisterInfo *MRI = nullptr;
167 const X86InstrInfo *TII = nullptr;
168 const TargetRegisterInfo *TRI = nullptr;
169
170 std::optional<PredState> PS;
171
172 void hardenEdgesWithLFENCE(MachineFunction &MF);
173
174 SmallVector<BlockCondInfo, 16> collectBlockCondInfo(MachineFunction &MF);
175
177 tracePredStateThroughCFG(MachineFunction &MF, ArrayRef<BlockCondInfo> Infos);
178
179 void unfoldCallAndJumpLoads(MachineFunction &MF);
180
182 tracePredStateThroughIndirectBranches(MachineFunction &MF);
183
184 void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
185
186 Register saveEFLAGS(MachineBasicBlock &MBB,
188 const DebugLoc &Loc);
189 void restoreEFLAGS(MachineBasicBlock &MBB,
190 MachineBasicBlock::iterator InsertPt, const DebugLoc &Loc,
191 Register Reg);
192
193 void mergePredStateIntoSP(MachineBasicBlock &MBB,
195 const DebugLoc &Loc, Register PredStateReg);
196 Register extractPredStateFromSP(MachineBasicBlock &MBB,
198 const DebugLoc &Loc);
199
200 void
201 hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
202 MachineOperand &IndexMO,
203 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
204 MachineInstr *
205 sinkPostLoadHardenedInst(MachineInstr &MI,
206 SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
207 bool canHardenRegister(Register Reg);
208 Register hardenValueInRegister(Register Reg, MachineBasicBlock &MBB,
210 const DebugLoc &Loc);
211 Register hardenPostLoad(MachineInstr &MI);
212 void hardenReturnInstr(MachineInstr &MI);
213 void tracePredStateThroughCall(MachineInstr &MI);
214 void hardenIndirectCallOrJumpInstr(
215 MachineInstr &MI,
216 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
217};
218
219} // end anonymous namespace
220
221bool X86SpeculativeLoadHardeningLegacy::runOnMachineFunction(
222 MachineFunction &MF) {
223 X86SpeculativeLoadHardeningImpl Impl;
224 bool Changed = Impl.run(MF);
225 LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump();
226 dbgs() << "\n"; MF.verify(this));
227 return Changed;
228}
229
230char X86SpeculativeLoadHardeningLegacy::ID = 0;
231
232void X86SpeculativeLoadHardeningLegacy::getAnalysisUsage(
233 AnalysisUsage &AU) const {
235}
236
238 MachineBasicBlock &Succ, int SuccCount,
239 MachineInstr *Br, MachineInstr *&UncondBr,
240 const X86InstrInfo &TII) {
241 assert(!Succ.isEHPad() && "Shouldn't get edges to EH pads!");
242
243 MachineFunction &MF = *MBB.getParent();
244
246
247 // We have to insert the new block immediately after the current one as we
248 // don't know what layout-successor relationships the successor has and we
249 // may not be able to (and generally don't want to) try to fix those up.
250 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
251
252 // Update the branch instruction if necessary.
253 if (Br) {
254 assert(Br->getOperand(0).getMBB() == &Succ &&
255 "Didn't start with the right target!");
256 Br->getOperand(0).setMBB(&NewMBB);
257
258 // If this successor was reached through a branch rather than fallthrough,
259 // we might have *broken* fallthrough and so need to inject a new
260 // unconditional branch.
261 if (!UncondBr) {
262 MachineBasicBlock &OldLayoutSucc =
263 *std::next(MachineFunction::iterator(&NewMBB));
264 assert(MBB.isSuccessor(&OldLayoutSucc) &&
265 "Without an unconditional branch, the old layout successor should "
266 "be an actual successor!");
267 auto BrBuilder =
268 BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc);
269 // Update the unconditional branch now that we've added one.
270 UncondBr = &*BrBuilder;
271 }
272
273 // Insert unconditional "jump Succ" instruction in the new block if
274 // necessary.
275 if (!NewMBB.isLayoutSuccessor(&Succ)) {
277 TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc());
278 }
279 } else {
280 assert(!UncondBr &&
281 "Cannot have a branchless successor and an unconditional branch!");
282 assert(NewMBB.isLayoutSuccessor(&Succ) &&
283 "A non-branch successor must have been a layout successor before "
284 "and now is a layout successor of the new block.");
285 }
286
287 // If this is the only edge to the successor, we can just replace it in the
288 // CFG. Otherwise we need to add a new entry in the CFG for the new
289 // successor.
290 if (SuccCount == 1) {
291 MBB.replaceSuccessor(&Succ, &NewMBB);
292 } else {
293 MBB.splitSuccessor(&Succ, &NewMBB);
294 }
295
296 // Hook up the edge from the new basic block to the old successor in the CFG.
297 NewMBB.addSuccessor(&Succ);
298
299 // Fix PHI nodes in Succ so they refer to NewMBB instead of MBB.
300 for (MachineInstr &MI : Succ) {
301 if (!MI.isPHI())
302 break;
303 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
304 OpIdx += 2) {
305 MachineOperand &OpV = MI.getOperand(OpIdx);
306 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
307 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
308 if (OpMBB.getMBB() != &MBB)
309 continue;
310
311 // If this is the last edge to the succesor, just replace MBB in the PHI
312 if (SuccCount == 1) {
313 OpMBB.setMBB(&NewMBB);
314 break;
315 }
316
317 // Otherwise, append a new pair of operands for the new incoming edge.
318 MI.addOperand(MF, OpV);
319 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
320 break;
321 }
322 }
323
324 // Inherit live-ins from the successor
325 for (auto &LI : Succ.liveins())
326 NewMBB.addLiveIn(LI);
327
328 LLVM_DEBUG(dbgs() << " Split edge from '" << MBB.getName() << "' to '"
329 << Succ.getName() << "'.\n");
330 return NewMBB;
331}
332
333/// Removing duplicate PHI operands to leave the PHI in a canonical and
334/// predictable form.
335///
336/// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
337/// isn't what you might expect. We may have multiple entries in PHI nodes for
338/// a single predecessor. This makes CFG-updating extremely complex, so here we
339/// simplify all PHI nodes to a model even simpler than the IR's model: exactly
340/// one entry per predecessor, regardless of how many edges there are.
343 SmallVector<int, 4> DupIndices;
344 for (auto &MBB : MF)
345 for (auto &MI : MBB) {
346 if (!MI.isPHI())
347 break;
348
349 // First we scan the operands of the PHI looking for duplicate entries
350 // a particular predecessor. We retain the operand index of each duplicate
351 // entry found.
352 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
353 OpIdx += 2)
354 if (!Preds.insert(MI.getOperand(OpIdx + 1).getMBB()).second)
355 DupIndices.push_back(OpIdx);
356
357 // Now walk the duplicate indices, removing both the block and value. Note
358 // that these are stored as a vector making this element-wise removal
359 // :w
360 // potentially quadratic.
361 //
362 // FIXME: It is really frustrating that we have to use a quadratic
363 // removal algorithm here. There should be a better way, but the use-def
364 // updates required make that impossible using the public API.
365 //
366 // Note that we have to process these backwards so that we don't
367 // invalidate other indices with each removal.
368 while (!DupIndices.empty()) {
369 int OpIdx = DupIndices.pop_back_val();
370 // Remove both the block and value operand, again in reverse order to
371 // preserve indices.
372 MI.removeOperand(OpIdx + 1);
373 MI.removeOperand(OpIdx);
374 }
375
376 Preds.clear();
377 }
378}
379
380/// Helper to scan a function for loads vulnerable to misspeculation that we
381/// want to harden.
382///
383/// We use this to avoid making changes to functions where there is nothing we
384/// need to do to harden against misspeculation.
386 for (MachineBasicBlock &MBB : MF) {
387 for (MachineInstr &MI : MBB) {
388 // Loads within this basic block after an LFENCE are not at risk of
389 // speculatively executing with invalid predicates from prior control
390 // flow. So break out of this block but continue scanning the function.
391 if (MI.getOpcode() == X86::LFENCE)
392 break;
393
394 // Looking for loads only.
395 if (!MI.mayLoad())
396 continue;
397
398 // An MFENCE is modeled as a load but isn't vulnerable to misspeculation.
399 if (MI.getOpcode() == X86::MFENCE)
400 continue;
401
402 // We found a load.
403 return true;
404 }
405 }
406
407 // No loads found.
408 return false;
409}
410
411bool X86SpeculativeLoadHardeningImpl::run(MachineFunction &MF) {
412 LLVM_DEBUG(dbgs() << "********** " << X86SLHPassName << " : " << MF.getName()
413 << " **********\n");
414
415 // Only run if this pass is forced enabled or we detect the relevant function
416 // attribute requesting SLH.
418 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
419 return false;
420
421 Subtarget = &MF.getSubtarget<X86Subtarget>();
422 MRI = &MF.getRegInfo();
423 TII = Subtarget->getInstrInfo();
424 TRI = Subtarget->getRegisterInfo();
425
426 // FIXME: Support for 32-bit.
427 PS.emplace(MF, &X86::GR64_NOSPRegClass);
428
429 if (MF.begin() == MF.end())
430 // Nothing to do for a degenerate empty function...
431 return false;
432
433 // We support an alternative hardening technique based on a debug flag.
435 hardenEdgesWithLFENCE(MF);
436 return true;
437 }
438
439 // Create a dummy debug loc to use for all the generated code here.
440 DebugLoc Loc;
441
442 MachineBasicBlock &Entry = *MF.begin();
443 auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin());
444
445 // Do a quick scan to see if we have any checkable loads.
446 bool HasVulnerableLoad = hasVulnerableLoad(MF);
447
448 // See if we have any conditional branching blocks that we will need to trace
449 // predicate state through.
450 SmallVector<BlockCondInfo, 16> Infos = collectBlockCondInfo(MF);
451
452 // If we have no interesting conditions or loads, nothing to do here.
453 if (!HasVulnerableLoad && Infos.empty())
454 return true;
455
456 // The poison value is required to be an all-ones value for many aspects of
457 // this mitigation.
458 const int PoisonVal = -1;
459 PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
460 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg)
461 .addImm(PoisonVal);
462 ++NumInstsInserted;
463
464 // If we have loads being hardened and we've asked for call and ret edges to
465 // get a full fence-based mitigation, inject that fence.
466 if (HasVulnerableLoad && FenceCallAndRet) {
467 // We need to insert an LFENCE at the start of the function to suspend any
468 // incoming misspeculation from the caller. This helps two-fold: the caller
469 // may not have been protected as this code has been, and this code gets to
470 // not take any specific action to protect across calls.
471 // FIXME: We could skip this for functions which unconditionally return
472 // a constant.
473 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE));
474 ++NumInstsInserted;
475 ++NumLFENCEsInserted;
476 }
477
478 // If we guarded the entry with an LFENCE and have no conditionals to protect
479 // in blocks, then we're done.
480 if (FenceCallAndRet && Infos.empty())
481 // We may have changed the function's code at this point to insert fences.
482 return true;
483
484 // For every basic block in the function which can b
486 // Set up the predicate state by extracting it from the incoming stack
487 // pointer so we pick up any misspeculation in our caller.
488 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
489 } else {
490 // Otherwise, just build the predicate state itself by zeroing a register
491 // as we don't need any initial state.
492 PS->InitialReg = MRI->createVirtualRegister(PS->RC);
493 Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass);
494 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0),
495 PredStateSubReg);
496 ++NumInstsInserted;
497 MachineOperand *ZeroEFLAGSDefOp =
498 ZeroI->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
499 assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&
500 "Must have an implicit def of EFLAGS!");
501 ZeroEFLAGSDefOp->setIsDead(true);
502 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG),
503 PS->InitialReg)
504 .addReg(PredStateSubReg)
505 .addImm(X86::sub_32bit);
506 }
507
508 // We're going to need to trace predicate state throughout the function's
509 // CFG. Prepare for this by setting up our initial state of PHIs with unique
510 // predecessor entries and all the initial predicate state.
512
513 // Track the updated values in an SSA updater to rewrite into SSA form at the
514 // end.
515 PS->SSA.Initialize(PS->InitialReg);
516 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
517
518 // Trace through the CFG.
519 auto CMovs = tracePredStateThroughCFG(MF, Infos);
520
521 // We may also enter basic blocks in this function via exception handling
522 // control flow. Here, if we are hardening interprocedurally, we need to
523 // re-capture the predicate state from the throwing code. In the Itanium ABI,
524 // the throw will always look like a call to __cxa_throw and will have the
525 // predicate state in the stack pointer, so extract fresh predicate state from
526 // the stack pointer and make it available in SSA.
527 // FIXME: Handle non-itanium ABI EH models.
529 for (MachineBasicBlock &MBB : MF) {
530 assert(!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!");
531 assert(!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!");
532 assert(!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!");
533 if (!MBB.isEHPad())
534 continue;
535 PS->SSA.AddAvailableValue(
536 &MBB,
537 extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc));
538 }
539 }
540
542 // If we are going to harden calls and jumps we need to unfold their memory
543 // operands.
544 unfoldCallAndJumpLoads(MF);
545
546 // Then we trace predicate state through the indirect branches.
547 auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
548 CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
549 }
550
551 // Now that we have the predicate state available at the start of each block
552 // in the CFG, trace it through each block, hardening vulnerable instructions
553 // as we go.
554 tracePredStateThroughBlocksAndHarden(MF);
555
556 // Now rewrite all the uses of the pred state using the SSA updater to insert
557 // PHIs connecting the state between blocks along the CFG edges.
558 for (MachineInstr *CMovI : CMovs)
559 for (MachineOperand &Op : CMovI->operands()) {
560 if (!Op.isReg() || Op.getReg() != PS->InitialReg)
561 continue;
562
563 PS->SSA.RewriteUse(Op);
564 }
565
566 return true;
567}
568
569/// Implements the naive hardening approach of putting an LFENCE after every
570/// potentially mis-predicted control flow construct.
571///
572/// We include this as an alternative mostly for the purpose of comparison. The
573/// performance impact of this is expected to be extremely severe and not
574/// practical for any real-world users.
575void X86SpeculativeLoadHardeningImpl::hardenEdgesWithLFENCE(
576 MachineFunction &MF) {
577 // First, we scan the function looking for blocks that are reached along edges
578 // that we might want to harden.
579 SmallSetVector<MachineBasicBlock *, 8> Blocks;
580 for (MachineBasicBlock &MBB : MF) {
581 // If there are no or only one successor, nothing to do here.
582 if (MBB.succ_size() <= 1)
583 continue;
584
585 // Skip blocks unless their terminators start with a branch. Other
586 // terminators don't seem interesting for guarding against misspeculation.
587 auto TermIt = MBB.getFirstTerminator();
588 if (TermIt == MBB.end() || !TermIt->isBranch())
589 continue;
590
591 // Add all the non-EH-pad succossors to the blocks we want to harden. We
592 // skip EH pads because there isn't really a condition of interest on
593 // entering.
594 for (MachineBasicBlock *SuccMBB : MBB.successors())
595 if (!SuccMBB->isEHPad())
596 Blocks.insert(SuccMBB);
597 }
598
599 for (MachineBasicBlock *MBB : Blocks) {
600 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
601 BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE));
602 ++NumInstsInserted;
603 ++NumLFENCEsInserted;
604 }
605}
606
608X86SpeculativeLoadHardeningImpl::collectBlockCondInfo(MachineFunction &MF) {
610
611 // Walk the function and build up a summary for each block's conditions that
612 // we need to trace through.
613 for (MachineBasicBlock &MBB : MF) {
614 // If there are no or only one successor, nothing to do here.
615 if (MBB.succ_size() <= 1)
616 continue;
617
618 // We want to reliably handle any conditional branch terminators in the
619 // MBB, so we manually analyze the branch. We can handle all of the
620 // permutations here, including ones that analyze branch cannot.
621 //
622 // The approach is to walk backwards across the terminators, resetting at
623 // any unconditional non-indirect branch, and track all conditional edges
624 // to basic blocks as well as the fallthrough or unconditional successor
625 // edge. For each conditional edge, we track the target and the opposite
626 // condition code in order to inject a "no-op" cmov into that successor
627 // that will harden the predicate. For the fallthrough/unconditional
628 // edge, we inject a separate cmov for each conditional branch with
629 // matching condition codes. This effectively implements an "and" of the
630 // condition flags, even if there isn't a single condition flag that would
631 // directly implement that. We don't bother trying to optimize either of
632 // these cases because if such an optimization is possible, LLVM should
633 // have optimized the conditional *branches* in that way already to reduce
634 // instruction count. This late, we simply assume the minimal number of
635 // branch instructions is being emitted and use that to guide our cmov
636 // insertion.
637
638 BlockCondInfo Info = {&MBB, {}, nullptr};
639
640 // Now walk backwards through the terminators and build up successors they
641 // reach and the conditions.
642 for (MachineInstr &MI : llvm::reverse(MBB)) {
643 // Once we've handled all the terminators, we're done.
644 if (!MI.isTerminator())
645 break;
646
647 // If we see a non-branch terminator, we can't handle anything so bail.
648 if (!MI.isBranch()) {
649 Info.CondBrs.clear();
650 break;
651 }
652
653 // If we see an unconditional branch, reset our state, clear any
654 // fallthrough, and set this is the "else" successor.
655 if (MI.getOpcode() == X86::JMP_1) {
656 Info.CondBrs.clear();
657 Info.UncondBr = &MI;
658 continue;
659 }
660
661 // If we get an invalid condition, we have an indirect branch or some
662 // other unanalyzable "fallthrough" case. We model this as a nullptr for
663 // the destination so we can still guard any conditional successors.
664 // Consider code sequences like:
665 // ```
666 // jCC L1
667 // jmpq *%rax
668 // ```
669 // We still want to harden the edge to `L1`.
671 Info.CondBrs.clear();
672 Info.UncondBr = &MI;
673 continue;
674 }
675
676 // We have a vanilla conditional branch, add it to our list.
677 Info.CondBrs.push_back(&MI);
678 }
679 if (Info.CondBrs.empty()) {
680 ++NumBranchesUntraced;
681 LLVM_DEBUG(dbgs() << "WARNING: unable to secure successors of block:\n";
682 MBB.dump());
683 continue;
684 }
685
686 Infos.push_back(Info);
687 }
688
689 return Infos;
690}
691
692/// Trace the predicate state through the CFG, instrumenting each conditional
693/// branch such that misspeculation through an edge will poison the predicate
694/// state.
695///
696/// Returns the list of inserted CMov instructions so that they can have their
697/// uses of the predicate state rewritten into proper SSA form once it is
698/// complete.
700X86SpeculativeLoadHardeningImpl::tracePredStateThroughCFG(
701 MachineFunction &MF, ArrayRef<BlockCondInfo> Infos) {
702 // Collect the inserted cmov instructions so we can rewrite their uses of the
703 // predicate state into SSA form.
705
706 // Now walk all of the basic blocks looking for ones that end in conditional
707 // jumps where we need to update this register along each edge.
708 for (const BlockCondInfo &Info : Infos) {
709 MachineBasicBlock &MBB = *Info.MBB;
710 const SmallVectorImpl<MachineInstr *> &CondBrs = Info.CondBrs;
711 MachineInstr *UncondBr = Info.UncondBr;
712
713 LLVM_DEBUG(dbgs() << "Tracing predicate through block: " << MBB.getName()
714 << "\n");
715 ++NumCondBranchesTraced;
716
717 // Compute the non-conditional successor as either the target of any
718 // unconditional branch or the layout successor.
719 MachineBasicBlock *UncondSucc =
720 UncondBr ? (UncondBr->getOpcode() == X86::JMP_1
721 ? UncondBr->getOperand(0).getMBB()
722 : nullptr)
723 : &*std::next(MachineFunction::iterator(&MBB));
724
725 // Count how many edges there are to any given successor.
726 SmallDenseMap<MachineBasicBlock *, int> SuccCounts;
727 if (UncondSucc)
728 ++SuccCounts[UncondSucc];
729 for (auto *CondBr : CondBrs)
730 ++SuccCounts[CondBr->getOperand(0).getMBB()];
731
732 // A lambda to insert cmov instructions into a block checking all of the
733 // condition codes in a sequence.
734 auto BuildCheckingBlockForSuccAndConds =
735 [&](MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount,
736 MachineInstr *Br, MachineInstr *&UncondBr,
738 // First, we split the edge to insert the checking block into a safe
739 // location.
740 auto &CheckingMBB =
741 (SuccCount == 1 && Succ.pred_size() == 1)
742 ? Succ
743 : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII);
744
745 bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS);
746 if (!LiveEFLAGS)
747 CheckingMBB.addLiveIn(X86::EFLAGS);
748
749 // Now insert the cmovs to implement the checks.
750 auto InsertPt = CheckingMBB.begin();
751 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
752 "Should never have a PHI in the initial checking block as it "
753 "always has a single predecessor!");
754
755 // We will wire each cmov to each other, but need to start with the
756 // incoming pred state.
757 Register CurStateReg = PS->InitialReg;
758
759 for (X86::CondCode Cond : Conds) {
760 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
761 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
762
763 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
764 // Note that we intentionally use an empty debug location so that
765 // this picks up the preceding location.
766 auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
767 TII->get(CMovOp), UpdatedStateReg)
768 .addReg(CurStateReg)
769 .addReg(PS->PoisonReg)
770 .addImm(Cond);
771 // If this is the last cmov and the EFLAGS weren't originally
772 // live-in, mark them as killed.
773 if (!LiveEFLAGS && Cond == Conds.back())
774 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)
775 ->setIsKill(true);
776
777 ++NumInstsInserted;
778 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();
779 dbgs() << "\n");
780
781 // The first one of the cmovs will be using the top level
782 // `PredStateReg` and need to get rewritten into SSA form.
783 if (CurStateReg == PS->InitialReg)
784 CMovs.push_back(&*CMovI);
785
786 // The next cmov should start from this one's def.
787 CurStateReg = UpdatedStateReg;
788 }
789
790 // And put the last one into the available values for SSA form of our
791 // predicate state.
792 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
793 };
794
795 std::vector<X86::CondCode> UncondCodeSeq;
796 for (auto *CondBr : CondBrs) {
797 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
798 int &SuccCount = SuccCounts[&Succ];
799
802 UncondCodeSeq.push_back(Cond);
803
804 BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr,
805 {InvCond});
806
807 // Decrement the successor count now that we've split one of the edges.
808 // We need to keep the count of edges to the successor accurate in order
809 // to know above when to *replace* the successor in the CFG vs. just
810 // adding the new successor.
811 --SuccCount;
812 }
813
814 // Since we may have split edges and changed the number of successors,
815 // normalize the probabilities. This avoids doing it each time we split an
816 // edge.
818
819 // Finally, we need to insert cmovs into the "fallthrough" edge. Here, we
820 // need to intersect the other condition codes. We can do this by just
821 // doing a cmov for each one.
822 if (!UncondSucc)
823 // If we have no fallthrough to protect (perhaps it is an indirect jump?)
824 // just skip this and continue.
825 continue;
826
827 assert(SuccCounts[UncondSucc] == 1 &&
828 "We should never have more than one edge to the unconditional "
829 "successor at this point because every other edge must have been "
830 "split above!");
831
832 // Sort and unique the codes to minimize them.
833 llvm::sort(UncondCodeSeq);
834 UncondCodeSeq.erase(llvm::unique(UncondCodeSeq), UncondCodeSeq.end());
835
836 // Build a checking version of the successor.
837 BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, /*SuccCount*/ 1,
838 UncondBr, UncondBr, UncondCodeSeq);
839 }
840
841 return CMovs;
842}
843
844/// Compute the register class for the unfolded load.
845///
846/// FIXME: This should probably live in X86InstrInfo, potentially by adding
847/// a way to unfold into a newly created vreg rather than requiring a register
848/// input.
849static const TargetRegisterClass *
851 unsigned Index;
852 unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
853 Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
854 const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
855 return TII.getRegClass(MCID, Index);
856}
857
858void X86SpeculativeLoadHardeningImpl::unfoldCallAndJumpLoads(
859 MachineFunction &MF) {
860 for (MachineBasicBlock &MBB : MF)
861 // We use make_early_inc_range here so we can remove instructions if needed
862 // without disturbing the iteration.
863 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.instrs())) {
864 // Must either be a call or a branch.
865 if (!MI.isCall() && !MI.isBranch())
866 continue;
867 // We only care about loading variants of these instructions.
868 if (!MI.mayLoad())
869 continue;
870
871 switch (MI.getOpcode()) {
872 default: {
874 dbgs() << "ERROR: Found an unexpected loading branch or call "
875 "instruction:\n";
876 MI.dump(); dbgs() << "\n");
877 report_fatal_error("Unexpected loading branch or call!");
878 }
879
880 case X86::FARCALL16m:
881 case X86::FARCALL32m:
882 case X86::FARCALL64m:
883 case X86::FARJMP16m:
884 case X86::FARJMP32m:
885 case X86::FARJMP64m:
886 // We cannot mitigate far jumps or calls, but we also don't expect them
887 // to be vulnerable to Spectre v1.2 style attacks.
888 continue;
889
890 case X86::CALL16m:
891 case X86::CALL16m_NT:
892 case X86::CALL32m:
893 case X86::CALL32m_NT:
894 case X86::CALL64m:
895 case X86::CALL64m_NT:
896 case X86::JMP16m:
897 case X86::JMP16m_NT:
898 case X86::JMP32m:
899 case X86::JMP32m_NT:
900 case X86::JMP64m:
901 case X86::JMP64m_NT:
902 case X86::TAILJMPm64:
903 case X86::TAILJMPm64_REX:
904 case X86::TAILJMPm:
905 case X86::TCRETURNmi64:
906 case X86::TCRETURN_WINmi64:
907 case X86::TCRETURNmi: {
908 // Use the generic unfold logic now that we know we're dealing with
909 // expected instructions.
910 // FIXME: We don't have test coverage for all of these!
911 auto *UnfoldedRC = getRegClassForUnfoldedLoad(*TII, MI.getOpcode());
912 if (!UnfoldedRC) {
914 << "ERROR: Unable to unfold load from instruction:\n";
915 MI.dump(); dbgs() << "\n");
916 report_fatal_error("Unable to unfold load!");
917 }
918 Register Reg = MRI->createVirtualRegister(UnfoldedRC);
919 SmallVector<MachineInstr *, 2> NewMIs;
920 // If we were able to compute an unfolded reg class, any failure here
921 // is just a programming error so just assert.
922 bool Unfolded =
923 TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true,
924 /*UnfoldStore*/ false, NewMIs);
925 (void)Unfolded;
926 assert(Unfolded &&
927 "Computed unfolded register class but failed to unfold");
928 // Now stitch the new instructions into place and erase the old one.
929 for (auto *NewMI : NewMIs)
930 MBB.insert(MI.getIterator(), NewMI);
931
932 // Update the call info.
933 if (MI.isCandidateForAdditionalCallInfo())
934 MF.eraseAdditionalCallInfo(&MI);
935
936 MI.eraseFromParent();
937 LLVM_DEBUG({
938 dbgs() << "Unfolded load successfully into:\n";
939 for (auto *NewMI : NewMIs) {
940 NewMI->dump();
941 dbgs() << "\n";
942 }
943 });
944 continue;
945 }
946 }
947 llvm_unreachable("Escaped switch with default!");
948 }
949}
950
951/// Trace the predicate state through indirect branches, instrumenting them to
952/// poison the state if a target is reached that does not match the expected
953/// target.
954///
955/// This is designed to mitigate Spectre variant 1 attacks where an indirect
956/// branch is trained to predict a particular target and then mispredicts that
957/// target in a way that can leak data. Despite using an indirect branch, this
958/// is really a variant 1 style attack: it does not steer execution to an
959/// arbitrary or attacker controlled address, and it does not require any
960/// special code executing next to the victim. This attack can also be mitigated
961/// through retpolines, but those require either replacing indirect branches
962/// with conditional direct branches or lowering them through a device that
963/// blocks speculation. This mitigation can replace these retpoline-style
964/// mitigations for jump tables and other indirect branches within a function
965/// when variant 2 isn't a risk while allowing limited speculation. Indirect
966/// calls, however, cannot be mitigated through this technique without changing
967/// the ABI in a fundamental way.
969X86SpeculativeLoadHardeningImpl::tracePredStateThroughIndirectBranches(
970 MachineFunction &MF) {
971 // We use the SSAUpdater to insert PHI nodes for the target addresses of
972 // indirect branches. We don't actually need the full power of the SSA updater
973 // in this particular case as we always have immediately available values, but
974 // this avoids us having to re-implement the PHI construction logic.
975 MachineSSAUpdater TargetAddrSSA(MF);
976 TargetAddrSSA.Initialize(MRI->createVirtualRegister(&X86::GR64RegClass));
977
978 // Track which blocks were terminated with an indirect branch.
979 SmallPtrSet<MachineBasicBlock *, 4> IndirectTerminatedMBBs;
980
981 // We need to know what blocks end up reached via indirect branches. We
982 // expect this to be a subset of those whose address is taken and so track it
983 // directly via the CFG.
984 SmallPtrSet<MachineBasicBlock *, 4> IndirectTargetMBBs;
985
986 // Walk all the blocks which end in an indirect branch and make the
987 // target address available.
988 for (MachineBasicBlock &MBB : MF) {
989 // Find the last terminator.
990 auto MII = MBB.instr_rbegin();
991 while (MII != MBB.instr_rend() && MII->isDebugInstr())
992 ++MII;
993 if (MII == MBB.instr_rend())
994 continue;
995 MachineInstr &TI = *MII;
996 if (!TI.isTerminator() || !TI.isBranch())
997 // No terminator or non-branch terminator.
998 continue;
999
1000 Register TargetReg;
1001
1002 switch (TI.getOpcode()) {
1003 default:
1004 // Direct branch or conditional branch (leading to fallthrough).
1005 continue;
1006
1007 case X86::FARJMP16m:
1008 case X86::FARJMP32m:
1009 case X86::FARJMP64m:
1010 // We cannot mitigate far jumps or calls, but we also don't expect them
1011 // to be vulnerable to Spectre v1.2 or v2 (self trained) style attacks.
1012 continue;
1013
1014 case X86::JMP16m:
1015 case X86::JMP16m_NT:
1016 case X86::JMP32m:
1017 case X86::JMP32m_NT:
1018 case X86::JMP64m:
1019 case X86::JMP64m_NT:
1020 // Mostly as documentation.
1021 report_fatal_error("Memory operand jumps should have been unfolded!");
1022
1023 case X86::JMP16r:
1025 "Support for 16-bit indirect branches is not implemented.");
1026 case X86::JMP32r:
1028 "Support for 32-bit indirect branches is not implemented.");
1029
1030 case X86::JMP64r:
1031 TargetReg = TI.getOperand(0).getReg();
1032 }
1033
1034 // We have definitely found an indirect branch. Verify that there are no
1035 // preceding conditional branches as we don't yet support that.
1036 if (llvm::any_of(MBB.terminators(), [&](MachineInstr &OtherTI) {
1037 return !OtherTI.isDebugInstr() && &OtherTI != &TI;
1038 })) {
1039 LLVM_DEBUG({
1040 dbgs() << "ERROR: Found other terminators in a block with an indirect "
1041 "branch! This is not yet supported! Terminator sequence:\n";
1042 for (MachineInstr &MI : MBB.terminators()) {
1043 MI.dump();
1044 dbgs() << '\n';
1045 }
1046 });
1047 report_fatal_error("Unimplemented terminator sequence!");
1048 }
1049
1050 // Make the target register an available value for this block.
1051 TargetAddrSSA.AddAvailableValue(&MBB, TargetReg);
1052 IndirectTerminatedMBBs.insert(&MBB);
1053
1054 // Add all the successors to our target candidates.
1055 IndirectTargetMBBs.insert_range(MBB.successors());
1056 }
1057
1058 // Keep track of the cmov instructions we insert so we can return them.
1060
1061 // If we didn't find any indirect branches with targets, nothing to do here.
1062 if (IndirectTargetMBBs.empty())
1063 return CMovs;
1064
1065 // We found indirect branches and targets that need to be instrumented to
1066 // harden loads within them. Walk the blocks of the function (to get a stable
1067 // ordering) and instrument each target of an indirect branch.
1068 for (MachineBasicBlock &MBB : MF) {
1069 // Skip the blocks that aren't candidate targets.
1070 if (!IndirectTargetMBBs.count(&MBB))
1071 continue;
1072
1073 // We don't expect EH pads to ever be reached via an indirect branch. If
1074 // this is desired for some reason, we could simply skip them here rather
1075 // than asserting.
1076 assert(!MBB.isEHPad() &&
1077 "Unexpected EH pad as target of an indirect branch!");
1078
1079 // We should never end up threading EFLAGS into a block to harden
1080 // conditional jumps as there would be an additional successor via the
1081 // indirect branch. As a consequence, all such edges would be split before
1082 // reaching here, and the inserted block will handle the EFLAGS-based
1083 // hardening.
1084 assert(!MBB.isLiveIn(X86::EFLAGS) &&
1085 "Cannot check within a block that already has live-in EFLAGS!");
1086
1087 // We can't handle having non-indirect edges into this block unless this is
1088 // the only successor and we can synthesize the necessary target address.
1089 for (MachineBasicBlock *Pred : MBB.predecessors()) {
1090 // If we've already handled this by extracting the target directly,
1091 // nothing to do.
1092 if (IndirectTerminatedMBBs.count(Pred))
1093 continue;
1094
1095 // Otherwise, we have to be the only successor. We generally expect this
1096 // to be true as conditional branches should have had a critical edge
1097 // split already. We don't however need to worry about EH pad successors
1098 // as they'll happily ignore the target and their hardening strategy is
1099 // resilient to all ways in which they could be reached speculatively.
1100 if (!llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) {
1101 return Succ->isEHPad() || Succ == &MBB;
1102 })) {
1103 LLVM_DEBUG({
1104 dbgs() << "ERROR: Found conditional entry to target of indirect "
1105 "branch!\n";
1106 Pred->dump();
1107 MBB.dump();
1108 });
1109 report_fatal_error("Cannot harden a conditional entry to a target of "
1110 "an indirect branch!");
1111 }
1112
1113 // Now we need to compute the address of this block and install it as a
1114 // synthetic target in the predecessor. We do this at the bottom of the
1115 // predecessor.
1116 auto InsertPt = Pred->getFirstTerminator();
1117 Register TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1118 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
1119 !Subtarget->isPositionIndependent()) {
1120 // Directly materialize it into an immediate.
1121 auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(),
1122 TII->get(X86::MOV64ri32), TargetReg)
1123 .addMBB(&MBB);
1124 ++NumInstsInserted;
1125 (void)AddrI;
1126 LLVM_DEBUG(dbgs() << " Inserting mov: "; AddrI->dump();
1127 dbgs() << "\n");
1128 } else {
1129 auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(), TII->get(X86::LEA64r),
1130 TargetReg)
1131 .addReg(/*Base*/ X86::RIP)
1132 .addImm(/*Scale*/ 1)
1133 .addReg(/*Index*/ 0)
1134 .addMBB(&MBB)
1135 .addReg(/*Segment*/ 0);
1136 ++NumInstsInserted;
1137 (void)AddrI;
1138 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump();
1139 dbgs() << "\n");
1140 }
1141 // And make this available.
1142 TargetAddrSSA.AddAvailableValue(Pred, TargetReg);
1143 }
1144
1145 // Materialize the needed SSA value of the target. Note that we need the
1146 // middle of the block as this block might at the bottom have an indirect
1147 // branch back to itself. We can do this here because at this point, every
1148 // predecessor of this block has an available value. This is basically just
1149 // automating the construction of a PHI node for this target.
1150 Register TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
1151
1152 // Insert a comparison of the incoming target register with this block's
1153 // address. This also requires us to mark the block as having its address
1154 // taken explicitly.
1156 auto InsertPt = MBB.SkipPHIsLabelsAndDebug(MBB.begin());
1157 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
1158 !Subtarget->isPositionIndependent()) {
1159 // Check directly against a relocated immediate when we can.
1160 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64ri32))
1161 .addReg(TargetReg, RegState::Kill)
1162 .addMBB(&MBB);
1163 ++NumInstsInserted;
1164 (void)CheckI;
1165 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
1166 } else {
1167 // Otherwise compute the address into a register first.
1168 Register AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1169 auto AddrI =
1170 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg)
1171 .addReg(/*Base*/ X86::RIP)
1172 .addImm(/*Scale*/ 1)
1173 .addReg(/*Index*/ 0)
1174 .addMBB(&MBB)
1175 .addReg(/*Segment*/ 0);
1176 ++NumInstsInserted;
1177 (void)AddrI;
1178 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); dbgs() << "\n");
1179 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
1180 .addReg(TargetReg, RegState::Kill)
1181 .addReg(AddrReg, RegState::Kill);
1182 ++NumInstsInserted;
1183 (void)CheckI;
1184 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
1185 }
1186
1187 // Now cmov over the predicate if the comparison wasn't equal.
1188 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
1189 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
1190 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
1191 auto CMovI =
1192 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg)
1193 .addReg(PS->InitialReg)
1194 .addReg(PS->PoisonReg)
1196 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)
1197 ->setIsKill(true);
1198 ++NumInstsInserted;
1199 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
1200 CMovs.push_back(&*CMovI);
1201
1202 // And put the new value into the available values for SSA form of our
1203 // predicate state.
1204 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
1205 }
1206
1207 // Return all the newly inserted cmov instructions of the predicate state.
1208 return CMovs;
1209}
1210
1211// Returns true if the MI has EFLAGS as a register def operand and it's live,
1212// otherwise it returns false
1213static bool isEFLAGSDefLive(const MachineInstr &MI) {
1214 if (const MachineOperand *DefOp =
1215 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) {
1216 return !DefOp->isDead();
1217 }
1218 return false;
1219}
1220
1222 const TargetRegisterInfo &TRI) {
1223 // Check if EFLAGS are alive by seeing if there is a def of them or they
1224 // live-in, and then seeing if that def is in turn used.
1225 for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) {
1226 if (MachineOperand *DefOp =
1227 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) {
1228 // If the def is dead, then EFLAGS is not live.
1229 if (DefOp->isDead())
1230 return false;
1231
1232 // Otherwise we've def'ed it, and it is live.
1233 return true;
1234 }
1235 // While at this instruction, also check if we use and kill EFLAGS
1236 // which means it isn't live.
1237 if (MI.killsRegister(X86::EFLAGS, &TRI))
1238 return false;
1239 }
1240
1241 // If we didn't find anything conclusive (neither definitely alive or
1242 // definitely dead) return whether it lives into the block.
1243 return MBB.isLiveIn(X86::EFLAGS);
1244}
1245
1246/// Trace the predicate state through each of the blocks in the function,
1247/// hardening everything necessary along the way.
1248///
1249/// We call this routine once the initial predicate state has been established
1250/// for each basic block in the function in the SSA updater. This routine traces
1251/// it through the instructions within each basic block, and for non-returning
1252/// blocks informs the SSA updater about the final state that lives out of the
1253/// block. Along the way, it hardens any vulnerable instruction using the
1254/// currently valid predicate state. We have to do these two things together
1255/// because the SSA updater only works across blocks. Within a block, we track
1256/// the current predicate state directly and update it as it changes.
1257///
1258/// This operates in two passes over each block. First, we analyze the loads in
1259/// the block to determine which strategy will be used to harden them: hardening
1260/// the address or hardening the loaded value when loaded into a register
1261/// amenable to hardening. We have to process these first because the two
1262/// strategies may interact -- later hardening may change what strategy we wish
1263/// to use. We also will analyze data dependencies between loads and avoid
1264/// hardening those loads that are data dependent on a load with a hardened
1265/// address. We also skip hardening loads already behind an LFENCE as that is
1266/// sufficient to harden them against misspeculation.
1267///
1268/// Second, we actively trace the predicate state through the block, applying
1269/// the hardening steps we determined necessary in the first pass as we go.
1270///
1271/// These two passes are applied to each basic block. We operate one block at a
1272/// time to simplify reasoning about reachability and sequencing.
1273void X86SpeculativeLoadHardeningImpl::tracePredStateThroughBlocksAndHarden(
1274 MachineFunction &MF) {
1275 SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
1276 SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
1277
1278 SmallSet<Register, 16> HardenedAddrRegs;
1279
1280 SmallDenseMap<Register, Register, 32> AddrRegToHardenedReg;
1281
1282 // Track the set of load-dependent registers through the basic block. Because
1283 // the values of these registers have an existing data dependency on a loaded
1284 // value which we would have checked, we can omit any checks on them.
1285 SparseBitVector<> LoadDepRegs;
1286
1287 for (MachineBasicBlock &MBB : MF) {
1288 // The first pass over the block: collect all the loads which can have their
1289 // loaded value hardened and all the loads that instead need their address
1290 // hardened. During this walk we propagate load dependence for address
1291 // hardened loads and also look for LFENCE to stop hardening wherever
1292 // possible. When deciding whether or not to harden the loaded value or not,
1293 // we check to see if any registers used in the address will have been
1294 // hardened at this point and if so, harden any remaining address registers
1295 // as that often successfully re-uses hardened addresses and minimizes
1296 // instructions.
1297 //
1298 // FIXME: We should consider an aggressive mode where we continue to keep as
1299 // many loads value hardened even when some address register hardening would
1300 // be free (due to reuse).
1301 //
1302 // Note that we only need this pass if we are actually hardening loads.
1303 if (HardenLoads)
1304 for (MachineInstr &MI : MBB) {
1305 // We naively assume that all def'ed registers of an instruction have
1306 // a data dependency on all of their operands.
1307 // FIXME: Do a more careful analysis of x86 to build a conservative
1308 // model here.
1309 if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
1310 return Op.isReg() && LoadDepRegs.test(Op.getReg().id());
1311 }))
1312 for (MachineOperand &Def : MI.defs())
1313 if (Def.isReg())
1314 LoadDepRegs.set(Def.getReg().id());
1315
1316 // Both Intel and AMD are guiding that they will change the semantics of
1317 // LFENCE to be a speculation barrier, so if we see an LFENCE, there is
1318 // no more need to guard things in this block.
1319 if (MI.getOpcode() == X86::LFENCE)
1320 break;
1321
1322 // If this instruction cannot load, nothing to do.
1323 if (!MI.mayLoad())
1324 continue;
1325
1326 // Some instructions which "load" are trivially safe or unimportant.
1327 if (MI.getOpcode() == X86::MFENCE)
1328 continue;
1329
1330 // Extract the memory operand information about this instruction.
1331 const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(MI);
1332 if (MemRefBeginIdx < 0) {
1334 << "WARNING: unable to harden loading instruction: ";
1335 MI.dump());
1336 continue;
1337 }
1338
1339 MachineOperand &BaseMO =
1340 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1341 MachineOperand &IndexMO =
1342 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1343
1344 // If we have at least one (non-frame-index, non-RIP) register operand,
1345 // and neither operand is load-dependent, we need to check the load.
1346 Register BaseReg, IndexReg;
1347 if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
1348 BaseMO.getReg().isValid())
1349 BaseReg = BaseMO.getReg();
1350 if (IndexMO.getReg().isValid())
1351 IndexReg = IndexMO.getReg();
1352
1353 if (!BaseReg && !IndexReg)
1354 // No register operands!
1355 continue;
1356
1357 // If any register operand is dependent, this load is dependent and we
1358 // needn't check it.
1359 // FIXME: Is this true in the case where we are hardening loads after
1360 // they complete? Unclear, need to investigate.
1361 if ((BaseReg && LoadDepRegs.test(BaseReg.id())) ||
1362 (IndexReg && LoadDepRegs.test(IndexReg.id())))
1363 continue;
1364
1365 // If post-load hardening is enabled, this load is compatible with
1366 // post-load hardening, and we aren't already going to harden one of the
1367 // address registers, queue it up to be hardened post-load. Notably,
1368 // even once hardened this won't introduce a useful dependency that
1369 // could prune out subsequent loads.
1371 !isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
1372 MI.getOperand(0).isReg() &&
1373 canHardenRegister(MI.getOperand(0).getReg()) &&
1374 !HardenedAddrRegs.count(BaseReg) &&
1375 !HardenedAddrRegs.count(IndexReg)) {
1376 HardenPostLoad.insert(&MI);
1377 HardenedAddrRegs.insert(MI.getOperand(0).getReg());
1378 continue;
1379 }
1380
1381 // Record this instruction for address hardening and record its register
1382 // operands as being address-hardened.
1383 HardenLoadAddr.insert(&MI);
1384 if (BaseReg)
1385 HardenedAddrRegs.insert(BaseReg);
1386 if (IndexReg)
1387 HardenedAddrRegs.insert(IndexReg);
1388
1389 for (MachineOperand &Def : MI.defs())
1390 if (Def.isReg())
1391 LoadDepRegs.set(Def.getReg().id());
1392 }
1393
1394 // Now re-walk the instructions in the basic block, and apply whichever
1395 // hardening strategy we have elected. Note that we do this in a second
1396 // pass specifically so that we have the complete set of instructions for
1397 // which we will do post-load hardening and can defer it in certain
1398 // circumstances.
1399 for (MachineInstr &MI : MBB) {
1400 if (HardenLoads) {
1401 // We cannot both require hardening the def of a load and its address.
1402 assert(!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) &&
1403 "Requested to harden both the address and def of a load!");
1404
1405 // Check if this is a load whose address needs to be hardened.
1406 if (HardenLoadAddr.erase(&MI)) {
1407 const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(MI);
1408 assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!");
1409
1410 MachineOperand &BaseMO =
1411 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1412 MachineOperand &IndexMO =
1413 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1414 hardenLoadAddr(MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1415 continue;
1416 }
1417
1418 // Test if this instruction is one of our post load instructions (and
1419 // remove it from the set if so).
1420 if (HardenPostLoad.erase(&MI)) {
1421 assert(!MI.isCall() && "Must not try to post-load harden a call!");
1422
1423 // If this is a data-invariant load and there is no EFLAGS
1424 // interference, we want to try and sink any hardening as far as
1425 // possible.
1427 // Sink the instruction we'll need to harden as far as we can down
1428 // the graph.
1429 MachineInstr *SunkMI = sinkPostLoadHardenedInst(MI, HardenPostLoad);
1430
1431 // If we managed to sink this instruction, update everything so we
1432 // harden that instruction when we reach it in the instruction
1433 // sequence.
1434 if (SunkMI != &MI) {
1435 // If in sinking there was no instruction needing to be hardened,
1436 // we're done.
1437 if (!SunkMI)
1438 continue;
1439
1440 // Otherwise, add this to the set of defs we harden.
1441 HardenPostLoad.insert(SunkMI);
1442 continue;
1443 }
1444 }
1445
1446 Register HardenedReg = hardenPostLoad(MI);
1447
1448 // Mark the resulting hardened register as such so we don't re-harden.
1449 AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1450
1451 continue;
1452 }
1453
1454 // Check for an indirect call or branch that may need its input hardened
1455 // even if we couldn't find the specific load used, or were able to
1456 // avoid hardening it for some reason. Note that here we cannot break
1457 // out afterward as we may still need to handle any call aspect of this
1458 // instruction.
1459 if ((MI.isCall() || MI.isBranch()) && HardenIndirectCallsAndJumps)
1460 hardenIndirectCallOrJumpInstr(MI, AddrRegToHardenedReg);
1461 }
1462
1463 // After we finish hardening loads we handle interprocedural hardening if
1464 // enabled and relevant for this instruction.
1466 continue;
1467 if (!MI.isCall() && !MI.isReturn())
1468 continue;
1469
1470 // If this is a direct return (IE, not a tail call) just directly harden
1471 // it.
1472 if (MI.isReturn() && !MI.isCall()) {
1473 hardenReturnInstr(MI);
1474 continue;
1475 }
1476
1477 // Otherwise we have a call. We need to handle transferring the predicate
1478 // state into a call and recovering it after the call returns (unless this
1479 // is a tail call).
1480 assert(MI.isCall() && "Should only reach here for calls!");
1481 tracePredStateThroughCall(MI);
1482 }
1483
1484 HardenPostLoad.clear();
1485 HardenLoadAddr.clear();
1486 HardenedAddrRegs.clear();
1487 AddrRegToHardenedReg.clear();
1488
1489 // Currently, we only track data-dependent loads within a basic block.
1490 // FIXME: We should see if this is necessary or if we could be more
1491 // aggressive here without opening up attack avenues.
1492 LoadDepRegs.clear();
1493 }
1494}
1495
1496/// Save EFLAGS into the returned GPR. This can in turn be restored with
1497/// `restoreEFLAGS`.
1498///
1499/// Note that LLVM can only lower very simple patterns of saved and restored
1500/// EFLAGS registers. The restore should always be within the same basic block
1501/// as the save so that no PHI nodes are inserted.
1502Register X86SpeculativeLoadHardeningImpl::saveEFLAGS(
1503 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1504 const DebugLoc &Loc) {
1505 // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
1506 // what instruction selection does.
1507 Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
1508 // We directly copy the FLAGS register and rely on later lowering to clean
1509 // this up into the appropriate setCC instructions.
1510 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
1511 ++NumInstsInserted;
1512 return Reg;
1513}
1514
1515/// Restore EFLAGS from the provided GPR. This should be produced by
1516/// `saveEFLAGS`.
1517///
1518/// This must be done within the same basic block as the save in order to
1519/// reliably lower.
1520void X86SpeculativeLoadHardeningImpl::restoreEFLAGS(
1521 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1522 const DebugLoc &Loc, Register Reg) {
1523 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
1524 ++NumInstsInserted;
1525}
1526
1527/// Takes the current predicate state (in a register) and merges it into the
1528/// stack pointer. The state is essentially a single bit, but we merge this in
1529/// a way that won't form non-canonical pointers and also will be preserved
1530/// across normal stack adjustments.
1531void X86SpeculativeLoadHardeningImpl::mergePredStateIntoSP(
1532 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1533 const DebugLoc &Loc, Register PredStateReg) {
1534 Register TmpReg = MRI->createVirtualRegister(PS->RC);
1535 // FIXME: This hard codes a shift distance based on the number of bits needed
1536 // to stay canonical on 64-bit. We should compute this somehow and support
1537 // 32-bit as part of that.
1538 auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg)
1539 .addReg(PredStateReg, RegState::Kill)
1540 .addImm(47);
1541 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1542 ++NumInstsInserted;
1543 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP)
1544 .addReg(X86::RSP)
1545 .addReg(TmpReg, RegState::Kill);
1546 OrI->addRegisterDead(X86::EFLAGS, TRI);
1547 ++NumInstsInserted;
1548}
1549
1550/// Extracts the predicate state stored in the high bits of the stack pointer.
1551Register X86SpeculativeLoadHardeningImpl::extractPredStateFromSP(
1552 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1553 const DebugLoc &Loc) {
1554 Register PredStateReg = MRI->createVirtualRegister(PS->RC);
1555 Register TmpReg = MRI->createVirtualRegister(PS->RC);
1556
1557 // We know that the stack pointer will have any preserved predicate state in
1558 // its high bit. We just want to smear this across the other bits. Turns out,
1559 // this is exactly what an arithmetic right shift does.
1560 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg)
1561 .addReg(X86::RSP);
1562 auto ShiftI =
1563 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg)
1564 .addReg(TmpReg, RegState::Kill)
1565 .addImm(TRI->getRegSizeInBits(*PS->RC) - 1);
1566 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1567 ++NumInstsInserted;
1568
1569 return PredStateReg;
1570}
1571
1572void X86SpeculativeLoadHardeningImpl::hardenLoadAddr(
1573 MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
1574 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
1575 MachineBasicBlock &MBB = *MI.getParent();
1576 const DebugLoc &Loc = MI.getDebugLoc();
1577
1578 // Check if EFLAGS are alive by seeing if there is a def of them or they
1579 // live-in, and then seeing if that def is in turn used.
1580 bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI);
1581
1583
1584 if (BaseMO.isFI()) {
1585 // A frame index is never a dynamically controllable load, so only
1586 // harden it if we're covering fixed address loads as well.
1587 LLVM_DEBUG(
1588 dbgs() << " Skipping hardening base of explicit stack frame load: ";
1589 MI.dump(); dbgs() << "\n");
1590 } else if (BaseMO.getReg() == X86::RSP) {
1591 // Some idempotent atomic operations are lowered directly to a locked
1592 // OR with 0 to the top of stack(or slightly offset from top) which uses an
1593 // explicit RSP register as the base.
1594 assert(IndexMO.getReg() == X86::NoRegister &&
1595 "Explicit RSP access with dynamic index!");
1596 LLVM_DEBUG(
1597 dbgs() << " Cannot harden base of explicit RSP offset in a load!");
1598 } else if (BaseMO.getReg() == X86::RIP ||
1599 BaseMO.getReg() == X86::NoRegister) {
1600 // For both RIP-relative addressed loads or absolute loads, we cannot
1601 // meaningfully harden them because the address being loaded has no
1602 // dynamic component.
1603 //
1604 // FIXME: When using a segment base (like TLS does) we end up with the
1605 // dynamic address being the base plus -1 because we can't mutate the
1606 // segment register here. This allows the signed 32-bit offset to point at
1607 // valid segment-relative addresses and load them successfully.
1608 LLVM_DEBUG(
1609 dbgs() << " Cannot harden base of "
1610 << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base")
1611 << " address in a load!");
1612 } else {
1613 assert(BaseMO.isReg() &&
1614 "Only allowed to have a frame index or register base.");
1615 HardenOpRegs.push_back(&BaseMO);
1616 }
1617
1618 if (IndexMO.getReg() != X86::NoRegister &&
1619 (HardenOpRegs.empty() ||
1620 HardenOpRegs.front()->getReg() != IndexMO.getReg()))
1621 HardenOpRegs.push_back(&IndexMO);
1622
1623 assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
1624 "Should have exactly one or two registers to harden!");
1625 assert((HardenOpRegs.size() == 1 ||
1626 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
1627 "Should not have two of the same registers!");
1628
1629 // Remove any registers that have alreaded been checked.
1630 llvm::erase_if(HardenOpRegs, [&](MachineOperand *Op) {
1631 // See if this operand's register has already been checked.
1632 auto It = AddrRegToHardenedReg.find(Op->getReg());
1633 if (It == AddrRegToHardenedReg.end())
1634 // Not checked, so retain this one.
1635 return false;
1636
1637 // Otherwise, we can directly update this operand and remove it.
1638 Op->setReg(It->second);
1639 return true;
1640 });
1641 // If there are none left, we're done.
1642 if (HardenOpRegs.empty())
1643 return;
1644
1645 // Compute the current predicate state.
1646 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
1647
1648 auto InsertPt = MI.getIterator();
1649
1650 // If EFLAGS are live and we don't have access to instructions that avoid
1651 // clobbering EFLAGS we need to save and restore them. This in turn makes
1652 // the EFLAGS no longer live.
1653 Register FlagsReg;
1654 if (EFLAGSLive && !Subtarget->hasBMI2()) {
1655 EFLAGSLive = false;
1656 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
1657 }
1658
1659 for (MachineOperand *Op : HardenOpRegs) {
1660 Register OpReg = Op->getReg();
1661 auto *OpRC = MRI->getRegClass(OpReg);
1662 Register TmpReg = MRI->createVirtualRegister(OpRC);
1663
1664 // If this is a vector register, we'll need somewhat custom logic to handle
1665 // hardening it.
1666 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
1667 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
1668 assert(Subtarget->hasAVX2() && "AVX2-specific register classes!");
1669 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
1670
1671 // Move our state into a vector register.
1672 // FIXME: We could skip this at the cost of longer encodings with AVX-512
1673 // but that doesn't seem likely worth it.
1674 Register VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
1675 auto MovI =
1676 BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
1677 .addReg(StateReg);
1678 (void)MovI;
1679 ++NumInstsInserted;
1680 LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n");
1681
1682 // Broadcast it across the vector register.
1683 Register VBStateReg = MRI->createVirtualRegister(OpRC);
1684 auto BroadcastI = BuildMI(MBB, InsertPt, Loc,
1685 TII->get(Is128Bit ? X86::VPBROADCASTQrr
1686 : X86::VPBROADCASTQYrr),
1687 VBStateReg)
1688 .addReg(VStateReg);
1689 (void)BroadcastI;
1690 ++NumInstsInserted;
1691 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
1692 dbgs() << "\n");
1693
1694 // Merge our potential poison state into the value with a vector or.
1695 auto OrI =
1696 BuildMI(MBB, InsertPt, Loc,
1697 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
1698 .addReg(VBStateReg)
1699 .addReg(OpReg);
1700 (void)OrI;
1701 ++NumInstsInserted;
1702 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1703 } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
1704 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
1705 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
1706 assert(Subtarget->hasAVX512() && "AVX512-specific register classes!");
1707 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
1708 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
1709 if (Is128Bit || Is256Bit)
1710 assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!");
1711
1712 // Broadcast our state into a vector register.
1713 Register VStateReg = MRI->createVirtualRegister(OpRC);
1714 unsigned BroadcastOp = Is128Bit ? X86::VPBROADCASTQrZ128rr
1715 : Is256Bit ? X86::VPBROADCASTQrZ256rr
1716 : X86::VPBROADCASTQrZrr;
1717 auto BroadcastI =
1718 BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
1719 .addReg(StateReg);
1720 (void)BroadcastI;
1721 ++NumInstsInserted;
1722 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
1723 dbgs() << "\n");
1724
1725 // Merge our potential poison state into the value with a vector or.
1726 unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
1727 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
1728 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
1729 .addReg(VStateReg)
1730 .addReg(OpReg);
1731 (void)OrI;
1732 ++NumInstsInserted;
1733 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1734 } else {
1735 // FIXME: Need to support GR32 here for 32-bit code.
1736 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
1737 "Not a supported register class for address hardening!");
1738
1739 if (!EFLAGSLive) {
1740 // Merge our potential poison state into the value with an or.
1741 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
1742 .addReg(StateReg)
1743 .addReg(OpReg);
1744 OrI->addRegisterDead(X86::EFLAGS, TRI);
1745 ++NumInstsInserted;
1746 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1747 } else {
1748 // We need to avoid touching EFLAGS so shift out all but the least
1749 // significant bit using the instruction that doesn't update flags.
1750 auto ShiftI =
1751 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
1752 .addReg(OpReg)
1753 .addReg(StateReg);
1754 (void)ShiftI;
1755 ++NumInstsInserted;
1756 LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
1757 dbgs() << "\n");
1758 }
1759 }
1760
1761 // Record this register as checked and update the operand.
1762 assert(!AddrRegToHardenedReg.count(Op->getReg()) &&
1763 "Should not have checked this register yet!");
1764 AddrRegToHardenedReg[Op->getReg()] = TmpReg;
1765 Op->setReg(TmpReg);
1766 ++NumAddrRegsHardened;
1767 }
1768
1769 // And restore the flags if needed.
1770 if (FlagsReg)
1771 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
1772}
1773
1774MachineInstr *X86SpeculativeLoadHardeningImpl::sinkPostLoadHardenedInst(
1775 MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) {
1777 "Cannot get here with a non-invariant load!");
1778 assert(!isEFLAGSDefLive(InitialMI) &&
1779 "Cannot get here with a data invariant load "
1780 "that interferes with EFLAGS!");
1781
1782 // See if we can sink hardening the loaded value.
1783 auto SinkCheckToSingleUse =
1784 [&](MachineInstr &MI) -> std::optional<MachineInstr *> {
1785 Register DefReg = MI.getOperand(0).getReg();
1786
1787 // We need to find a single use which we can sink the check. We can
1788 // primarily do this because many uses may already end up checked on their
1789 // own.
1790 MachineInstr *SingleUseMI = nullptr;
1791 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) {
1792 // If we're already going to harden this use, it is data invariant, it
1793 // does not interfere with EFLAGS, and within our block.
1794 if (HardenedInstrs.count(&UseMI)) {
1796 // If we've already decided to harden a non-load, we must have sunk
1797 // some other post-load hardened instruction to it and it must itself
1798 // be data-invariant.
1800 "Data variant instruction being hardened!");
1801 continue;
1802 }
1803
1804 // Otherwise, this is a load and the load component can't be data
1805 // invariant so check how this register is being used.
1806 const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(UseMI);
1807 assert(MemRefBeginIdx >= 0 &&
1808 "Should always have mem references here!");
1809
1810 MachineOperand &BaseMO =
1811 UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1812 MachineOperand &IndexMO =
1813 UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1814 if ((BaseMO.isReg() && BaseMO.getReg() == DefReg) ||
1815 (IndexMO.isReg() && IndexMO.getReg() == DefReg))
1816 // The load uses the register as part of its address making it not
1817 // invariant.
1818 return {};
1819
1820 continue;
1821 }
1822
1823 if (SingleUseMI)
1824 // We already have a single use, this would make two. Bail.
1825 return {};
1826
1827 // If this single use isn't data invariant, isn't in this block, or has
1828 // interfering EFLAGS, we can't sink the hardening to it.
1829 if (!X86InstrInfo::isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent() ||
1831 return {};
1832
1833 // If this instruction defines multiple registers bail as we won't harden
1834 // all of them.
1835 if (UseMI.getDesc().getNumDefs() > 1)
1836 return {};
1837
1838 // If this register isn't a virtual register we can't walk uses of sanely,
1839 // just bail. Also check that its register class is one of the ones we
1840 // can harden.
1841 Register UseDefReg = UseMI.getOperand(0).getReg();
1842 if (!canHardenRegister(UseDefReg))
1843 return {};
1844
1845 SingleUseMI = &UseMI;
1846 }
1847
1848 // If SingleUseMI is still null, there is no use that needs its own
1849 // checking. Otherwise, it is the single use that needs checking.
1850 return {SingleUseMI};
1851 };
1852
1853 MachineInstr *MI = &InitialMI;
1854 while (std::optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
1855 // Update which MI we're checking now.
1856 MI = *SingleUse;
1857 if (!MI)
1858 break;
1859 }
1860
1861 return MI;
1862}
1863
1864bool X86SpeculativeLoadHardeningImpl::canHardenRegister(Register Reg) {
1865 // We only support hardening virtual registers.
1866 if (!Reg.isVirtual())
1867 return false;
1868
1869 auto *RC = MRI->getRegClass(Reg);
1870 int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
1871 if (RegBytes > 8)
1872 // We don't support post-load hardening of vectors.
1873 return false;
1874
1875 unsigned RegIdx = Log2_32(RegBytes);
1876 assert(RegIdx < 4 && "Unsupported register size");
1877
1878 // If this register class is explicitly constrained to a class that doesn't
1879 // require REX prefix, we may not be able to satisfy that constraint when
1880 // emitting the hardening instructions, so bail out here.
1881 // FIXME: This seems like a pretty lame hack. The way this comes up is when we
1882 // end up both with a NOREX and REX-only register as operands to the hardening
1883 // instructions. It would be better to fix that code to handle this situation
1884 // rather than hack around it in this way.
1885 const TargetRegisterClass *NOREXRegClasses[] = {
1886 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
1887 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
1888 if (RC == NOREXRegClasses[RegIdx])
1889 return false;
1890
1891 const TargetRegisterClass *GPRRegClasses[] = {
1892 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
1893 &X86::GR64RegClass};
1894 return RC->hasSuperClassEq(GPRRegClasses[RegIdx]);
1895}
1896
1897/// Harden a value in a register.
1898///
1899/// This is the low-level logic to fully harden a value sitting in a register
1900/// against leaking during speculative execution.
1901///
1902/// Unlike hardening an address that is used by a load, this routine is required
1903/// to hide *all* incoming bits in the register.
1904///
1905/// `Reg` must be a virtual register. Currently, it is required to be a GPR no
1906/// larger than the predicate state register. FIXME: We should support vector
1907/// registers here by broadcasting the predicate state.
1908///
1909/// The new, hardened virtual register is returned. It will have the same
1910/// register class as `Reg`.
1911Register X86SpeculativeLoadHardeningImpl::hardenValueInRegister(
1912 Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1913 const DebugLoc &Loc) {
1914 assert(canHardenRegister(Reg) && "Cannot harden this register!");
1915
1916 auto *RC = MRI->getRegClass(Reg);
1917 int Bytes = TRI->getRegSizeInBits(*RC) / 8;
1918 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
1919 assert((Bytes == 1 || Bytes == 2 || Bytes == 4 || Bytes == 8) &&
1920 "Unknown register size");
1921
1922 // FIXME: Need to teach this about 32-bit mode.
1923 if (Bytes != 8) {
1924 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
1925 unsigned SubRegImm = SubRegImms[Log2_32(Bytes)];
1926 Register NarrowStateReg = MRI->createVirtualRegister(RC);
1927 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg)
1928 .addReg(StateReg, {}, SubRegImm);
1929 StateReg = NarrowStateReg;
1930 }
1931
1932 Register FlagsReg;
1933 if (isEFLAGSLive(MBB, InsertPt, *TRI))
1934 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
1935
1936 Register NewReg = MRI->createVirtualRegister(RC);
1937 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
1938 unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)];
1939 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg)
1940 .addReg(StateReg)
1941 .addReg(Reg);
1942 OrI->addRegisterDead(X86::EFLAGS, TRI);
1943 ++NumInstsInserted;
1944 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1945
1946 if (FlagsReg)
1947 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
1948
1949 return NewReg;
1950}
1951
1952/// Harden a load by hardening the loaded value in the defined register.
1953///
1954/// We can harden a non-leaking load into a register without touching the
1955/// address by just hiding all of the loaded bits during misspeculation. We use
1956/// an `or` instruction to do this because we set up our poison value as all
1957/// ones. And the goal is just for the loaded bits to not be exposed to
1958/// execution and coercing them to one is sufficient.
1959///
1960/// Returns the newly hardened register.
1961Register X86SpeculativeLoadHardeningImpl::hardenPostLoad(MachineInstr &MI) {
1962 MachineBasicBlock &MBB = *MI.getParent();
1963 const DebugLoc &Loc = MI.getDebugLoc();
1964
1965 auto &DefOp = MI.getOperand(0);
1966 Register OldDefReg = DefOp.getReg();
1967 auto *DefRC = MRI->getRegClass(OldDefReg);
1968
1969 // Because we want to completely replace the uses of this def'ed value with
1970 // the hardened value, create a dedicated new register that will only be used
1971 // to communicate the unhardened value to the hardening.
1972 Register UnhardenedReg = MRI->createVirtualRegister(DefRC);
1973 DefOp.setReg(UnhardenedReg);
1974
1975 // Now harden this register's value, getting a hardened reg that is safe to
1976 // use. Note that we insert the instructions to compute this *after* the
1977 // defining instruction, not before it.
1978 Register HardenedReg = hardenValueInRegister(
1979 UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);
1980
1981 // Finally, replace the old register (which now only has the uses of the
1982 // original def) with the hardened register.
1983 MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg);
1984
1985 ++NumPostLoadRegsHardened;
1986 return HardenedReg;
1987}
1988
1989/// Harden a return instruction.
1990///
1991/// Returns implicitly perform a load which we need to harden. Without hardening
1992/// this load, an attacker my speculatively write over the return address to
1993/// steer speculation of the return to an attacker controlled address. This is
1994/// called Spectre v1.1 or Bounds Check Bypass Store (BCBS) and is described in
1995/// this paper:
1996/// https://people.csail.mit.edu/vlk/spectre11.pdf
1997///
1998/// We can harden this by introducing an LFENCE that will delay any load of the
1999/// return address until prior instructions have retired (and thus are not being
2000/// speculated), or we can harden the address used by the implicit load: the
2001/// stack pointer.
2002///
2003/// If we are not using an LFENCE, hardening the stack pointer has an additional
2004/// benefit: it allows us to pass the predicate state accumulated in this
2005/// function back to the caller. In the absence of a BCBS attack on the return,
2006/// the caller will typically be resumed and speculatively executed due to the
2007/// Return Stack Buffer (RSB) prediction which is very accurate and has a high
2008/// priority. It is possible that some code from the caller will be executed
2009/// speculatively even during a BCBS-attacked return until the steering takes
2010/// effect. Whenever this happens, the caller can recover the (poisoned)
2011/// predicate state from the stack pointer and continue to harden loads.
2012void X86SpeculativeLoadHardeningImpl::hardenReturnInstr(MachineInstr &MI) {
2013 MachineBasicBlock &MBB = *MI.getParent();
2014 const DebugLoc &Loc = MI.getDebugLoc();
2015 auto InsertPt = MI.getIterator();
2016
2017 if (FenceCallAndRet)
2018 // No need to fence here as we'll fence at the return site itself. That
2019 // handles more cases than we can handle here.
2020 return;
2021
2022 // Take our predicate state, shift it to the high 17 bits (so that we keep
2023 // pointers canonical) and merge it into RSP. This will allow the caller to
2024 // extract it when we return (speculatively).
2025 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
2026}
2027
2028/// Trace the predicate state through a call.
2029///
2030/// There are several layers of this needed to handle the full complexity of
2031/// calls.
2032///
2033/// First, we need to send the predicate state into the called function. We do
2034/// this by merging it into the high bits of the stack pointer.
2035///
2036/// For tail calls, this is all we need to do.
2037///
2038/// For calls where we might return and resume the control flow, we need to
2039/// extract the predicate state from the high bits of the stack pointer after
2040/// control returns from the called function.
2041///
2042/// We also need to verify that we intended to return to this location in the
2043/// code. An attacker might arrange for the processor to mispredict the return
2044/// to this valid but incorrect return address in the program rather than the
2045/// correct one. See the paper on this attack, called "ret2spec" by the
2046/// researchers, here:
2047/// https://christian-rossow.de/publications/ret2spec-ccs2018.pdf
2048///
2049/// The way we verify that we returned to the correct location is by preserving
2050/// the expected return address across the call. One technique involves taking
2051/// advantage of the red-zone to load the return address from `8(%rsp)` where it
2052/// was left by the RET instruction when it popped `%rsp`. Alternatively, we can
2053/// directly save the address into a register that will be preserved across the
2054/// call. We compare this intended return address against the address
2055/// immediately following the call (the observed return address). If these
2056/// mismatch, we have detected misspeculation and can poison our predicate
2057/// state.
2058void X86SpeculativeLoadHardeningImpl::tracePredStateThroughCall(
2059 MachineInstr &MI) {
2060 MachineBasicBlock &MBB = *MI.getParent();
2061 MachineFunction &MF = *MBB.getParent();
2062 auto InsertPt = MI.getIterator();
2063 const DebugLoc &Loc = MI.getDebugLoc();
2064
2065 if (FenceCallAndRet) {
2066 if (MI.isReturn())
2067 // Tail call, we don't return to this function.
2068 // FIXME: We should also handle noreturn calls.
2069 return;
2070
2071 // We don't need to fence before the call because the function should fence
2072 // in its entry. However, we do need to fence after the call returns.
2073 // Fencing before the return doesn't correctly handle cases where the return
2074 // itself is mispredicted.
2075 BuildMI(MBB, std::next(InsertPt), Loc, TII->get(X86::LFENCE));
2076 ++NumInstsInserted;
2077 ++NumLFENCEsInserted;
2078 return;
2079 }
2080
2081 // First, we transfer the predicate state into the called function by merging
2082 // it into the stack pointer. This will kill the current def of the state.
2083 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2084 mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
2085
2086 // If this call is also a return, it is a tail call and we don't need anything
2087 // else to handle it so just return. Also, if there are no further
2088 // instructions and no successors, this call does not return so we can also
2089 // bail.
2090 if (MI.isReturn() || (std::next(InsertPt) == MBB.end() && MBB.succ_empty()))
2091 return;
2092
2093 // Create a symbol to track the return address and attach it to the call
2094 // machine instruction. We will lower extra symbols attached to call
2095 // instructions as label immediately following the call.
2096 MCSymbol *RetSymbol =
2097 MF.getContext().createTempSymbol("slh_ret_addr",
2098 /*AlwaysAddSuffix*/ true);
2099 MI.setPostInstrSymbol(MF, RetSymbol);
2100
2101 const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
2102 Register ExpectedRetAddrReg;
2103
2104 // If we have no red zones or if the function returns twice (possibly without
2105 // using the `ret` instruction) like setjmp, we need to save the expected
2106 // return address prior to the call.
2107 if (!Subtarget->getFrameLowering()->has128ByteRedZone(MF) ||
2108 MF.exposesReturnsTwice()) {
2109 // If we don't have red zones, we need to compute the expected return
2110 // address prior to the call and store it in a register that lives across
2111 // the call.
2112 //
2113 // In some ways, this is doubly satisfying as a mitigation because it will
2114 // also successfully detect stack smashing bugs in some cases (typically,
2115 // when a callee-saved register is used and the callee doesn't push it onto
2116 // the stack). But that isn't our primary goal, so we only use it as
2117 // a fallback.
2118 //
2119 // FIXME: It isn't clear that this is reliable in the face of
2120 // rematerialization in the register allocator. We somehow need to force
2121 // that to not occur for this particular instruction, and instead to spill
2122 // or otherwise preserve the value computed *prior* to the call.
2123 //
2124 // FIXME: It is even less clear why MachineCSE can't just fold this when we
2125 // end up having to use identical instructions both before and after the
2126 // call to feed the comparison.
2127 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2128 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
2129 !Subtarget->isPositionIndependent()) {
2130 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2131 .addSym(RetSymbol);
2132 } else {
2133 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ExpectedRetAddrReg)
2134 .addReg(/*Base*/ X86::RIP)
2135 .addImm(/*Scale*/ 1)
2136 .addReg(/*Index*/ 0)
2137 .addSym(RetSymbol)
2138 .addReg(/*Segment*/ 0);
2139 }
2140 }
2141
2142 // Step past the call to handle when it returns.
2143 ++InsertPt;
2144
2145 // If we didn't pre-compute the expected return address into a register, then
2146 // red zones are enabled and the return address is still available on the
2147 // stack immediately after the call. As the very first instruction, we load it
2148 // into a register.
2149 if (!ExpectedRetAddrReg) {
2150 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2151 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2152 .addReg(/*Base*/ X86::RSP)
2153 .addImm(/*Scale*/ 1)
2154 .addReg(/*Index*/ 0)
2155 .addImm(/*Displacement*/ -8) // The stack pointer has been popped, so
2156 // the return address is 8-bytes past it.
2157 .addReg(/*Segment*/ 0);
2158 }
2159
2160 // Now we extract the callee's predicate state from the stack pointer.
2161 Register NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
2162
2163 // Test the expected return address against our actual address. If we can
2164 // form this basic block's address as an immediate, this is easy. Otherwise
2165 // we compute it.
2166 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
2167 !Subtarget->isPositionIndependent()) {
2168 // FIXME: Could we fold this with the load? It would require careful EFLAGS
2169 // management.
2170 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64ri32))
2171 .addReg(ExpectedRetAddrReg, RegState::Kill)
2172 .addSym(RetSymbol);
2173 } else {
2174 Register ActualRetAddrReg = MRI->createVirtualRegister(AddrRC);
2175 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg)
2176 .addReg(/*Base*/ X86::RIP)
2177 .addImm(/*Scale*/ 1)
2178 .addReg(/*Index*/ 0)
2179 .addSym(RetSymbol)
2180 .addReg(/*Segment*/ 0);
2181 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64rr))
2182 .addReg(ExpectedRetAddrReg, RegState::Kill)
2183 .addReg(ActualRetAddrReg, RegState::Kill);
2184 }
2185
2186 // Now conditionally update the predicate state we just extracted if we ended
2187 // up at a different return address than expected.
2188 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
2189 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
2190
2191 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
2192 auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg)
2193 .addReg(NewStateReg, RegState::Kill)
2194 .addReg(PS->PoisonReg)
2196 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
2197 ++NumInstsInserted;
2198 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
2199
2200 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
2201}
2202
2203/// An attacker may speculatively store over a value that is then speculatively
2204/// loaded and used as the target of an indirect call or jump instruction. This
2205/// is called Spectre v1.2 or Bounds Check Bypass Store (BCBS) and is described
2206/// in this paper:
2207/// https://people.csail.mit.edu/vlk/spectre11.pdf
2208///
2209/// When this happens, the speculative execution of the call or jump will end up
2210/// being steered to this attacker controlled address. While most such loads
2211/// will be adequately hardened already, we want to ensure that they are
2212/// definitively treated as needing post-load hardening. While address hardening
2213/// is sufficient to prevent secret data from leaking to the attacker, it may
2214/// not be sufficient to prevent an attacker from steering speculative
2215/// execution. We forcibly unfolded all relevant loads above and so will always
2216/// have an opportunity to post-load harden here, we just need to scan for cases
2217/// not already flagged and add them.
2218void X86SpeculativeLoadHardeningImpl::hardenIndirectCallOrJumpInstr(
2219 MachineInstr &MI,
2220 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
2221 switch (MI.getOpcode()) {
2222 case X86::FARCALL16m:
2223 case X86::FARCALL32m:
2224 case X86::FARCALL64m:
2225 case X86::FARJMP16m:
2226 case X86::FARJMP32m:
2227 case X86::FARJMP64m:
2228 // We don't need to harden either far calls or far jumps as they are
2229 // safe from Spectre.
2230 return;
2231
2232 default:
2233 break;
2234 }
2235
2236 // We should never see a loading instruction at this point, as those should
2237 // have been unfolded.
2238 assert(!MI.mayLoad() && "Found a lingering loading instruction!");
2239
2240 // If the first operand isn't a register, this is a branch or call
2241 // instruction with an immediate operand which doesn't need to be hardened.
2242 if (!MI.getOperand(0).isReg())
2243 return;
2244
2245 // For all of these, the target register is the first operand of the
2246 // instruction.
2247 auto &TargetOp = MI.getOperand(0);
2248 Register OldTargetReg = TargetOp.getReg();
2249
2250 // Try to lookup a hardened version of this register. We retain a reference
2251 // here as we want to update the map to track any newly computed hardened
2252 // register.
2253 Register &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2254
2255 // If we don't have a hardened register yet, compute one. Otherwise, just use
2256 // the already hardened register.
2257 //
2258 // FIXME: It is a little suspect that we use partially hardened registers that
2259 // only feed addresses. The complexity of partial hardening with SHRX
2260 // continues to pile up. Should definitively measure its value and consider
2261 // eliminating it.
2262 if (!HardenedTargetReg)
2263 HardenedTargetReg = hardenValueInRegister(
2264 OldTargetReg, *MI.getParent(), MI.getIterator(), MI.getDebugLoc());
2265
2266 // Set the target operand to the hardened register.
2267 TargetOp.setReg(HardenedTargetReg);
2268
2269 ++NumCallsOrJumpsHardened;
2270}
2271
2272PreservedAnalyses
2275 X86SpeculativeLoadHardeningImpl Impl;
2276 const bool Changed = Impl.run(MF);
2277 LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump();
2278 dbgs() << "\n"; MF.verify(MFAM));
2282}
2283
2284INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningLegacy, PASS_KEY,
2285 "X86 speculative load hardener", false, false)
2286INITIALIZE_PASS_END(X86SpeculativeLoadHardeningLegacy, PASS_KEY,
2287 "X86 speculative load hardener", false, false)
2288
2290 return new X86SpeculativeLoadHardeningLegacy();
2291}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > HardenLoads("aarch64-slh-loads", cl::Hidden, cl::desc("Sanitize loads from memory."), cl::init(true))
MachineBasicBlock & MBB
This file defines the DenseMap class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseBitVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define PASS_KEY
static MachineBasicBlock & splitEdge(MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount, MachineInstr *Br, MachineInstr *&UncondBr, const X86InstrInfo &TII)
static cl::opt< bool > HardenLoads(PASS_KEY "-loads", cl::desc("Sanitize loads from memory. When disable, no " "significant security is provided."), cl::init(true), cl::Hidden)
static void canonicalizePHIOperands(MachineFunction &MF)
Removing duplicate PHI operands to leave the PHI in a canonical and predictable form.
static cl::opt< bool > HardenInterprocedurally(PASS_KEY "-ip", cl::desc("Harden interprocedurally by passing our state in and out of " "functions in the high bits of the stack pointer."), cl::init(true), cl::Hidden)
static cl::opt< bool > FenceCallAndRet(PASS_KEY "-fence-call-and-ret", cl::desc("Use a full speculation fence to harden both call and ret edges " "rather than a lighter weight mitigation."), cl::init(false), cl::Hidden)
static cl::opt< bool > EnablePostLoadHardening(PASS_KEY "-post-load", cl::desc("Harden the value loaded *after* it is loaded by " "flushing the loaded bits to 1. This is hard to do " "in general but can be done easily for GPRs."), cl::init(true), cl::Hidden)
static cl::opt< bool > HardenEdgesWithLFENCE(PASS_KEY "-lfence", cl::desc("Use LFENCE along each conditional edge to harden against speculative " "loads rather than conditional movs and poisoned pointers."), cl::init(false), cl::Hidden)
static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const TargetRegisterInfo &TRI)
static cl::opt< bool > EnableSpeculativeLoadHardening("x86-speculative-load-hardening", cl::desc("Force enable speculative load hardening"), cl::init(false), cl::Hidden)
static const TargetRegisterClass * getRegClassForUnfoldedLoad(const X86InstrInfo &TII, unsigned Opcode)
Compute the register class for the unfolded load.
static bool hasVulnerableLoad(MachineFunction &MF)
Helper to scan a function for loads vulnerable to misspeculation that we want to harden.
static bool isEFLAGSDefLive(const MachineInstr &MI)
static cl::opt< bool > HardenIndirectCallsAndJumps(PASS_KEY "-indirect", cl::desc("Harden indirect calls and jumps against using speculatively " "stored attacker controlled addresses. This is designed to " "mitigate Spectre v1.2 style attacks."), cl::init(true), cl::Hidden)
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
reverse_instr_iterator instr_rbegin()
LLVM_ABI iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
LLVM_ABI iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg=Register(), bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void dump() const
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
reverse_instr_iterator instr_rend()
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
iterator_range< pred_iterator > predecessors()
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isCleanupFuncletEntry() const
Returns true if this is the entry block of a cleanup funclet.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
MachineOperand * findRegisterUseOperand(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
void dump() const
Definition Pass.cpp:146
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
constexpr bool isValid() const
Definition Register.h:112
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:176
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void set(unsigned Idx)
bool test(unsigned Idx) const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool has128ByteRedZone(const MachineFunction &MF) const
Return true if the function has a redzone (accessible bytes past the frame of the top of stack functi...
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
const X86InstrInfo * getInstrInfo() const override
bool hasAVX512() const
bool isPositionIndependent() const
const X86RegisterInfo * getRegisterInfo() const override
const X86FrameLowering * getFrameLowering() const override
bool hasAVX2() const
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
CondCode getCondFromBranch(const MachineInstr &MI)
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
BBIterator iterator
Definition BasicBlock.h:87
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
auto unique(Range &&R, Predicate P)
Definition STLExtras.h:2134
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
FunctionPass * createX86SpeculativeLoadHardeningLegacyPass()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2192