LLVM  10.0.0svn
X86SpeculativeLoadHardening.cpp
Go to the documentation of this file.
1 //====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// Provide a pass which mitigates speculative execution attacks which operate
11 /// by speculating incorrectly past some predicate (a type check, bounds check,
12 /// or other condition) to reach a load with invalid inputs and leak the data
13 /// accessed by that load using a side channel out of the speculative domain.
14 ///
15 /// For details on the attacks, see the first variant in both the Project Zero
16 /// writeup and the Spectre paper:
17 /// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
18 /// https://spectreattack.com/spectre.pdf
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "X86.h"
23 #include "X86InstrBuilder.h"
24 #include "X86InstrInfo.h"
25 #include "X86Subtarget.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/Optional.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/ScopeExit.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
50 #include "llvm/IR/DebugLoc.h"
51 #include "llvm/MC/MCSchedule.h"
52 #include "llvm/Pass.h"
54 #include "llvm/Support/Debug.h"
56 #include <algorithm>
57 #include <cassert>
58 #include <iterator>
59 #include <utility>
60 
61 using namespace llvm;
62 
63 #define PASS_KEY "x86-slh"
64 #define DEBUG_TYPE PASS_KEY
65 
66 STATISTIC(NumCondBranchesTraced, "Number of conditional branches traced");
67 STATISTIC(NumBranchesUntraced, "Number of branches unable to trace");
68 STATISTIC(NumAddrRegsHardened,
69  "Number of address mode used registers hardaned");
70 STATISTIC(NumPostLoadRegsHardened,
71  "Number of post-load register values hardened");
72 STATISTIC(NumCallsOrJumpsHardened,
73  "Number of calls or jumps requiring extra hardening");
74 STATISTIC(NumInstsInserted, "Number of instructions inserted");
75 STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted");
76 
78  "x86-speculative-load-hardening",
79  cl::desc("Force enable speculative load hardening"), cl::init(false),
80  cl::Hidden);
81 
83  PASS_KEY "-lfence",
84  cl::desc(
85  "Use LFENCE along each conditional edge to harden against speculative "
86  "loads rather than conditional movs and poisoned pointers."),
87  cl::init(false), cl::Hidden);
88 
90  PASS_KEY "-post-load",
91  cl::desc("Harden the value loaded *after* it is loaded by "
92  "flushing the loaded bits to 1. This is hard to do "
93  "in general but can be done easily for GPRs."),
94  cl::init(true), cl::Hidden);
95 
97  PASS_KEY "-fence-call-and-ret",
98  cl::desc("Use a full speculation fence to harden both call and ret edges "
99  "rather than a lighter weight mitigation."),
100  cl::init(false), cl::Hidden);
101 
103  PASS_KEY "-ip",
104  cl::desc("Harden interprocedurally by passing our state in and out of "
105  "functions in the high bits of the stack pointer."),
106  cl::init(true), cl::Hidden);
107 
108 static cl::opt<bool>
109  HardenLoads(PASS_KEY "-loads",
110  cl::desc("Sanitize loads from memory. When disable, no "
111  "significant security is provided."),
112  cl::init(true), cl::Hidden);
113 
115  PASS_KEY "-indirect",
116  cl::desc("Harden indirect calls and jumps against using speculatively "
117  "stored attacker controlled addresses. This is designed to "
118  "mitigate Spectre v1.2 style attacks."),
119  cl::init(true), cl::Hidden);
120 
121 namespace {
122 
123 class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
124 public:
125  X86SpeculativeLoadHardeningPass() : MachineFunctionPass(ID) { }
126 
127  StringRef getPassName() const override {
128  return "X86 speculative load hardening";
129  }
130  bool runOnMachineFunction(MachineFunction &MF) override;
131  void getAnalysisUsage(AnalysisUsage &AU) const override;
132 
133  /// Pass identification, replacement for typeid.
134  static char ID;
135 
136 private:
137  /// The information about a block's conditional terminators needed to trace
138  /// our predicate state through the exiting edges.
139  struct BlockCondInfo {
140  MachineBasicBlock *MBB;
141 
142  // We mostly have one conditional branch, and in extremely rare cases have
143  // two. Three and more are so rare as to be unimportant for compile time.
145 
146  MachineInstr *UncondBr;
147  };
148 
149  /// Manages the predicate state traced through the program.
150  struct PredState {
151  unsigned InitialReg;
152  unsigned PoisonReg;
153 
154  const TargetRegisterClass *RC;
156 
157  PredState(MachineFunction &MF, const TargetRegisterClass *RC)
158  : RC(RC), SSA(MF) {}
159  };
160 
161  const X86Subtarget *Subtarget;
163  const X86InstrInfo *TII;
164  const TargetRegisterInfo *TRI;
165 
167 
168  void hardenEdgesWithLFENCE(MachineFunction &MF);
169 
170  SmallVector<BlockCondInfo, 16> collectBlockCondInfo(MachineFunction &MF);
171 
173  tracePredStateThroughCFG(MachineFunction &MF, ArrayRef<BlockCondInfo> Infos);
174 
175  void unfoldCallAndJumpLoads(MachineFunction &MF);
176 
178  tracePredStateThroughIndirectBranches(MachineFunction &MF);
179 
180  void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
181 
182  unsigned saveEFLAGS(MachineBasicBlock &MBB,
183  MachineBasicBlock::iterator InsertPt, DebugLoc Loc);
184  void restoreEFLAGS(MachineBasicBlock &MBB,
186  unsigned OFReg);
187 
188  void mergePredStateIntoSP(MachineBasicBlock &MBB,
190  unsigned PredStateReg);
191  unsigned extractPredStateFromSP(MachineBasicBlock &MBB,
193  DebugLoc Loc);
194 
195  void
196  hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
197  MachineOperand &IndexMO,
198  SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
199  MachineInstr *
200  sinkPostLoadHardenedInst(MachineInstr &MI,
201  SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
202  bool canHardenRegister(unsigned Reg);
203  unsigned hardenValueInRegister(unsigned Reg, MachineBasicBlock &MBB,
205  DebugLoc Loc);
206  unsigned hardenPostLoad(MachineInstr &MI);
207  void hardenReturnInstr(MachineInstr &MI);
208  void tracePredStateThroughCall(MachineInstr &MI);
209  void hardenIndirectCallOrJumpInstr(
210  MachineInstr &MI,
211  SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
212 };
213 
214 } // end anonymous namespace
215 
217 
218 void X86SpeculativeLoadHardeningPass::getAnalysisUsage(
219  AnalysisUsage &AU) const {
221 }
222 
224  MachineBasicBlock &Succ, int SuccCount,
225  MachineInstr *Br, MachineInstr *&UncondBr,
226  const X86InstrInfo &TII) {
227  assert(!Succ.isEHPad() && "Shouldn't get edges to EH pads!");
228 
229  MachineFunction &MF = *MBB.getParent();
230 
231  MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
232 
233  // We have to insert the new block immediately after the current one as we
234  // don't know what layout-successor relationships the successor has and we
235  // may not be able to (and generally don't want to) try to fix those up.
236  MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
237 
238  // Update the branch instruction if necessary.
239  if (Br) {
240  assert(Br->getOperand(0).getMBB() == &Succ &&
241  "Didn't start with the right target!");
242  Br->getOperand(0).setMBB(&NewMBB);
243 
244  // If this successor was reached through a branch rather than fallthrough,
245  // we might have *broken* fallthrough and so need to inject a new
246  // unconditional branch.
247  if (!UncondBr) {
248  MachineBasicBlock &OldLayoutSucc =
249  *std::next(MachineFunction::iterator(&NewMBB));
250  assert(MBB.isSuccessor(&OldLayoutSucc) &&
251  "Without an unconditional branch, the old layout successor should "
252  "be an actual successor!");
253  auto BrBuilder =
254  BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc);
255  // Update the unconditional branch now that we've added one.
256  UncondBr = &*BrBuilder;
257  }
258 
259  // Insert unconditional "jump Succ" instruction in the new block if
260  // necessary.
261  if (!NewMBB.isLayoutSuccessor(&Succ)) {
263  TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc());
264  }
265  } else {
266  assert(!UncondBr &&
267  "Cannot have a branchless successor and an unconditional branch!");
268  assert(NewMBB.isLayoutSuccessor(&Succ) &&
269  "A non-branch successor must have been a layout successor before "
270  "and now is a layout successor of the new block.");
271  }
272 
273  // If this is the only edge to the successor, we can just replace it in the
274  // CFG. Otherwise we need to add a new entry in the CFG for the new
275  // successor.
276  if (SuccCount == 1) {
277  MBB.replaceSuccessor(&Succ, &NewMBB);
278  } else {
279  MBB.splitSuccessor(&Succ, &NewMBB);
280  }
281 
282  // Hook up the edge from the new basic block to the old successor in the CFG.
283  NewMBB.addSuccessor(&Succ);
284 
285  // Fix PHI nodes in Succ so they refer to NewMBB instead of MBB.
286  for (MachineInstr &MI : Succ) {
287  if (!MI.isPHI())
288  break;
289  for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
290  OpIdx += 2) {
291  MachineOperand &OpV = MI.getOperand(OpIdx);
292  MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
293  assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
294  if (OpMBB.getMBB() != &MBB)
295  continue;
296 
297  // If this is the last edge to the succesor, just replace MBB in the PHI
298  if (SuccCount == 1) {
299  OpMBB.setMBB(&NewMBB);
300  break;
301  }
302 
303  // Otherwise, append a new pair of operands for the new incoming edge.
304  MI.addOperand(MF, OpV);
305  MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
306  break;
307  }
308  }
309 
310  // Inherit live-ins from the successor
311  for (auto &LI : Succ.liveins())
312  NewMBB.addLiveIn(LI);
313 
314  LLVM_DEBUG(dbgs() << " Split edge from '" << MBB.getName() << "' to '"
315  << Succ.getName() << "'.\n");
316  return NewMBB;
317 }
318 
319 /// Removing duplicate PHI operands to leave the PHI in a canonical and
320 /// predictable form.
321 ///
322 /// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
323 /// isn't what you might expect. We may have multiple entries in PHI nodes for
324 /// a single predecessor. This makes CFG-updating extremely complex, so here we
325 /// simplify all PHI nodes to a model even simpler than the IR's model: exactly
326 /// one entry per predecessor, regardless of how many edges there are.
329  SmallVector<int, 4> DupIndices;
330  for (auto &MBB : MF)
331  for (auto &MI : MBB) {
332  if (!MI.isPHI())
333  break;
334 
335  // First we scan the operands of the PHI looking for duplicate entries
336  // a particular predecessor. We retain the operand index of each duplicate
337  // entry found.
338  for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
339  OpIdx += 2)
340  if (!Preds.insert(MI.getOperand(OpIdx + 1).getMBB()).second)
341  DupIndices.push_back(OpIdx);
342 
343  // Now walk the duplicate indices, removing both the block and value. Note
344  // that these are stored as a vector making this element-wise removal
345  // :w
346  // potentially quadratic.
347  //
348  // FIXME: It is really frustrating that we have to use a quadratic
349  // removal algorithm here. There should be a better way, but the use-def
350  // updates required make that impossible using the public API.
351  //
352  // Note that we have to process these backwards so that we don't
353  // invalidate other indices with each removal.
354  while (!DupIndices.empty()) {
355  int OpIdx = DupIndices.pop_back_val();
356  // Remove both the block and value operand, again in reverse order to
357  // preserve indices.
358  MI.RemoveOperand(OpIdx + 1);
359  MI.RemoveOperand(OpIdx);
360  }
361 
362  Preds.clear();
363  }
364 }
365 
366 /// Helper to scan a function for loads vulnerable to misspeculation that we
367 /// want to harden.
368 ///
369 /// We use this to avoid making changes to functions where there is nothing we
370 /// need to do to harden against misspeculation.
372  for (MachineBasicBlock &MBB : MF) {
373  for (MachineInstr &MI : MBB) {
374  // Loads within this basic block after an LFENCE are not at risk of
375  // speculatively executing with invalid predicates from prior control
376  // flow. So break out of this block but continue scanning the function.
377  if (MI.getOpcode() == X86::LFENCE)
378  break;
379 
380  // Looking for loads only.
381  if (!MI.mayLoad())
382  continue;
383 
384  // An MFENCE is modeled as a load but isn't vulnerable to misspeculation.
385  if (MI.getOpcode() == X86::MFENCE)
386  continue;
387 
388  // We found a load.
389  return true;
390  }
391  }
392 
393  // No loads found.
394  return false;
395 }
396 
397 bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
398  MachineFunction &MF) {
399  LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
400  << " **********\n");
401 
402  // Only run if this pass is forced enabled or we detect the relevant function
403  // attribute requesting SLH.
405  !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
406  return false;
407 
408  Subtarget = &MF.getSubtarget<X86Subtarget>();
409  MRI = &MF.getRegInfo();
410  TII = Subtarget->getInstrInfo();
411  TRI = Subtarget->getRegisterInfo();
412 
413  // FIXME: Support for 32-bit.
414  PS.emplace(MF, &X86::GR64_NOSPRegClass);
415 
416  if (MF.begin() == MF.end())
417  // Nothing to do for a degenerate empty function...
418  return false;
419 
420  // We support an alternative hardening technique based on a debug flag.
421  if (HardenEdgesWithLFENCE) {
422  hardenEdgesWithLFENCE(MF);
423  return true;
424  }
425 
426  // Create a dummy debug loc to use for all the generated code here.
427  DebugLoc Loc;
428 
429  MachineBasicBlock &Entry = *MF.begin();
430  auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin());
431 
432  // Do a quick scan to see if we have any checkable loads.
433  bool HasVulnerableLoad = hasVulnerableLoad(MF);
434 
435  // See if we have any conditional branching blocks that we will need to trace
436  // predicate state through.
437  SmallVector<BlockCondInfo, 16> Infos = collectBlockCondInfo(MF);
438 
439  // If we have no interesting conditions or loads, nothing to do here.
440  if (!HasVulnerableLoad && Infos.empty())
441  return true;
442 
443  // The poison value is required to be an all-ones value for many aspects of
444  // this mitigation.
445  const int PoisonVal = -1;
446  PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
447  BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg)
448  .addImm(PoisonVal);
449  ++NumInstsInserted;
450 
451  // If we have loads being hardened and we've asked for call and ret edges to
452  // get a full fence-based mitigation, inject that fence.
453  if (HasVulnerableLoad && FenceCallAndRet) {
454  // We need to insert an LFENCE at the start of the function to suspend any
455  // incoming misspeculation from the caller. This helps two-fold: the caller
456  // may not have been protected as this code has been, and this code gets to
457  // not take any specific action to protect across calls.
458  // FIXME: We could skip this for functions which unconditionally return
459  // a constant.
460  BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE));
461  ++NumInstsInserted;
462  ++NumLFENCEsInserted;
463  }
464 
465  // If we guarded the entry with an LFENCE and have no conditionals to protect
466  // in blocks, then we're done.
467  if (FenceCallAndRet && Infos.empty())
468  // We may have changed the function's code at this point to insert fences.
469  return true;
470 
471  // For every basic block in the function which can b
473  // Set up the predicate state by extracting it from the incoming stack
474  // pointer so we pick up any misspeculation in our caller.
475  PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
476  } else {
477  // Otherwise, just build the predicate state itself by zeroing a register
478  // as we don't need any initial state.
479  PS->InitialReg = MRI->createVirtualRegister(PS->RC);
480  Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass);
481  auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0),
482  PredStateSubReg);
483  ++NumInstsInserted;
484  MachineOperand *ZeroEFLAGSDefOp =
485  ZeroI->findRegisterDefOperand(X86::EFLAGS);
486  assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&
487  "Must have an implicit def of EFLAGS!");
488  ZeroEFLAGSDefOp->setIsDead(true);
489  BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG),
490  PS->InitialReg)
491  .addImm(0)
492  .addReg(PredStateSubReg)
493  .addImm(X86::sub_32bit);
494  }
495 
496  // We're going to need to trace predicate state throughout the function's
497  // CFG. Prepare for this by setting up our initial state of PHIs with unique
498  // predecessor entries and all the initial predicate state.
500 
501  // Track the updated values in an SSA updater to rewrite into SSA form at the
502  // end.
503  PS->SSA.Initialize(PS->InitialReg);
504  PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
505 
506  // Trace through the CFG.
507  auto CMovs = tracePredStateThroughCFG(MF, Infos);
508 
509  // We may also enter basic blocks in this function via exception handling
510  // control flow. Here, if we are hardening interprocedurally, we need to
511  // re-capture the predicate state from the throwing code. In the Itanium ABI,
512  // the throw will always look like a call to __cxa_throw and will have the
513  // predicate state in the stack pointer, so extract fresh predicate state from
514  // the stack pointer and make it available in SSA.
515  // FIXME: Handle non-itanium ABI EH models.
517  for (MachineBasicBlock &MBB : MF) {
518  assert(!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!");
519  assert(!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!");
520  assert(!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!");
521  if (!MBB.isEHPad())
522  continue;
523  PS->SSA.AddAvailableValue(
524  &MBB,
525  extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc));
526  }
527  }
528 
530  // If we are going to harden calls and jumps we need to unfold their memory
531  // operands.
532  unfoldCallAndJumpLoads(MF);
533 
534  // Then we trace predicate state through the indirect branches.
535  auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
536  CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
537  }
538 
539  // Now that we have the predicate state available at the start of each block
540  // in the CFG, trace it through each block, hardening vulnerable instructions
541  // as we go.
542  tracePredStateThroughBlocksAndHarden(MF);
543 
544  // Now rewrite all the uses of the pred state using the SSA updater to insert
545  // PHIs connecting the state between blocks along the CFG edges.
546  for (MachineInstr *CMovI : CMovs)
547  for (MachineOperand &Op : CMovI->operands()) {
548  if (!Op.isReg() || Op.getReg() != PS->InitialReg)
549  continue;
550 
551  PS->SSA.RewriteUse(Op);
552  }
553 
554  LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump();
555  dbgs() << "\n"; MF.verify(this));
556  return true;
557 }
558 
559 /// Implements the naive hardening approach of putting an LFENCE after every
560 /// potentially mis-predicted control flow construct.
561 ///
562 /// We include this as an alternative mostly for the purpose of comparison. The
563 /// performance impact of this is expected to be extremely severe and not
564 /// practical for any real-world users.
565 void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE(
566  MachineFunction &MF) {
567  // First, we scan the function looking for blocks that are reached along edges
568  // that we might want to harden.
570  for (MachineBasicBlock &MBB : MF) {
571  // If there are no or only one successor, nothing to do here.
572  if (MBB.succ_size() <= 1)
573  continue;
574 
575  // Skip blocks unless their terminators start with a branch. Other
576  // terminators don't seem interesting for guarding against misspeculation.
577  auto TermIt = MBB.getFirstTerminator();
578  if (TermIt == MBB.end() || !TermIt->isBranch())
579  continue;
580 
581  // Add all the non-EH-pad succossors to the blocks we want to harden. We
582  // skip EH pads because there isn't really a condition of interest on
583  // entering.
584  for (MachineBasicBlock *SuccMBB : MBB.successors())
585  if (!SuccMBB->isEHPad())
586  Blocks.insert(SuccMBB);
587  }
588 
589  for (MachineBasicBlock *MBB : Blocks) {
590  auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
591  BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE));
592  ++NumInstsInserted;
593  ++NumLFENCEsInserted;
594  }
595 }
596 
598 X86SpeculativeLoadHardeningPass::collectBlockCondInfo(MachineFunction &MF) {
600 
601  // Walk the function and build up a summary for each block's conditions that
602  // we need to trace through.
603  for (MachineBasicBlock &MBB : MF) {
604  // If there are no or only one successor, nothing to do here.
605  if (MBB.succ_size() <= 1)
606  continue;
607 
608  // We want to reliably handle any conditional branch terminators in the
609  // MBB, so we manually analyze the branch. We can handle all of the
610  // permutations here, including ones that analyze branch cannot.
611  //
612  // The approach is to walk backwards across the terminators, resetting at
613  // any unconditional non-indirect branch, and track all conditional edges
614  // to basic blocks as well as the fallthrough or unconditional successor
615  // edge. For each conditional edge, we track the target and the opposite
616  // condition code in order to inject a "no-op" cmov into that successor
617  // that will harden the predicate. For the fallthrough/unconditional
618  // edge, we inject a separate cmov for each conditional branch with
619  // matching condition codes. This effectively implements an "and" of the
620  // condition flags, even if there isn't a single condition flag that would
621  // directly implement that. We don't bother trying to optimize either of
622  // these cases because if such an optimization is possible, LLVM should
623  // have optimized the conditional *branches* in that way already to reduce
624  // instruction count. This late, we simply assume the minimal number of
625  // branch instructions is being emitted and use that to guide our cmov
626  // insertion.
627 
628  BlockCondInfo Info = {&MBB, {}, nullptr};
629 
630  // Now walk backwards through the terminators and build up successors they
631  // reach and the conditions.
632  for (MachineInstr &MI : llvm::reverse(MBB)) {
633  // Once we've handled all the terminators, we're done.
634  if (!MI.isTerminator())
635  break;
636 
637  // If we see a non-branch terminator, we can't handle anything so bail.
638  if (!MI.isBranch()) {
639  Info.CondBrs.clear();
640  break;
641  }
642 
643  // If we see an unconditional branch, reset our state, clear any
644  // fallthrough, and set this is the "else" successor.
645  if (MI.getOpcode() == X86::JMP_1) {
646  Info.CondBrs.clear();
647  Info.UncondBr = &MI;
648  continue;
649  }
650 
651  // If we get an invalid condition, we have an indirect branch or some
652  // other unanalyzable "fallthrough" case. We model this as a nullptr for
653  // the destination so we can still guard any conditional successors.
654  // Consider code sequences like:
655  // ```
656  // jCC L1
657  // jmpq *%rax
658  // ```
659  // We still want to harden the edge to `L1`.
661  Info.CondBrs.clear();
662  Info.UncondBr = &MI;
663  continue;
664  }
665 
666  // We have a vanilla conditional branch, add it to our list.
667  Info.CondBrs.push_back(&MI);
668  }
669  if (Info.CondBrs.empty()) {
670  ++NumBranchesUntraced;
671  LLVM_DEBUG(dbgs() << "WARNING: unable to secure successors of block:\n";
672  MBB.dump());
673  continue;
674  }
675 
676  Infos.push_back(Info);
677  }
678 
679  return Infos;
680 }
681 
682 /// Trace the predicate state through the CFG, instrumenting each conditional
683 /// branch such that misspeculation through an edge will poison the predicate
684 /// state.
685 ///
686 /// Returns the list of inserted CMov instructions so that they can have their
687 /// uses of the predicate state rewritten into proper SSA form once it is
688 /// complete.
690 X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
692  // Collect the inserted cmov instructions so we can rewrite their uses of the
693  // predicate state into SSA form.
695 
696  // Now walk all of the basic blocks looking for ones that end in conditional
697  // jumps where we need to update this register along each edge.
698  for (const BlockCondInfo &Info : Infos) {
699  MachineBasicBlock &MBB = *Info.MBB;
700  const SmallVectorImpl<MachineInstr *> &CondBrs = Info.CondBrs;
701  MachineInstr *UncondBr = Info.UncondBr;
702 
703  LLVM_DEBUG(dbgs() << "Tracing predicate through block: " << MBB.getName()
704  << "\n");
705  ++NumCondBranchesTraced;
706 
707  // Compute the non-conditional successor as either the target of any
708  // unconditional branch or the layout successor.
709  MachineBasicBlock *UncondSucc =
710  UncondBr ? (UncondBr->getOpcode() == X86::JMP_1
711  ? UncondBr->getOperand(0).getMBB()
712  : nullptr)
713  : &*std::next(MachineFunction::iterator(&MBB));
714 
715  // Count how many edges there are to any given successor.
717  if (UncondSucc)
718  ++SuccCounts[UncondSucc];
719  for (auto *CondBr : CondBrs)
720  ++SuccCounts[CondBr->getOperand(0).getMBB()];
721 
722  // A lambda to insert cmov instructions into a block checking all of the
723  // condition codes in a sequence.
724  auto BuildCheckingBlockForSuccAndConds =
725  [&](MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount,
726  MachineInstr *Br, MachineInstr *&UncondBr,
727  ArrayRef<X86::CondCode> Conds) {
728  // First, we split the edge to insert the checking block into a safe
729  // location.
730  auto &CheckingMBB =
731  (SuccCount == 1 && Succ.pred_size() == 1)
732  ? Succ
733  : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII);
734 
735  bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS);
736  if (!LiveEFLAGS)
737  CheckingMBB.addLiveIn(X86::EFLAGS);
738 
739  // Now insert the cmovs to implement the checks.
740  auto InsertPt = CheckingMBB.begin();
741  assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
742  "Should never have a PHI in the initial checking block as it "
743  "always has a single predecessor!");
744 
745  // We will wire each cmov to each other, but need to start with the
746  // incoming pred state.
747  unsigned CurStateReg = PS->InitialReg;
748 
749  for (X86::CondCode Cond : Conds) {
750  int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
751  auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
752 
753  Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
754  // Note that we intentionally use an empty debug location so that
755  // this picks up the preceding location.
756  auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
757  TII->get(CMovOp), UpdatedStateReg)
758  .addReg(CurStateReg)
759  .addReg(PS->PoisonReg)
760  .addImm(Cond);
761  // If this is the last cmov and the EFLAGS weren't originally
762  // live-in, mark them as killed.
763  if (!LiveEFLAGS && Cond == Conds.back())
764  CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
765 
766  ++NumInstsInserted;
767  LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();
768  dbgs() << "\n");
769 
770  // The first one of the cmovs will be using the top level
771  // `PredStateReg` and need to get rewritten into SSA form.
772  if (CurStateReg == PS->InitialReg)
773  CMovs.push_back(&*CMovI);
774 
775  // The next cmov should start from this one's def.
776  CurStateReg = UpdatedStateReg;
777  }
778 
779  // And put the last one into the available values for SSA form of our
780  // predicate state.
781  PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
782  };
783 
784  std::vector<X86::CondCode> UncondCodeSeq;
785  for (auto *CondBr : CondBrs) {
786  MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
787  int &SuccCount = SuccCounts[&Succ];
788 
789  X86::CondCode Cond = X86::getCondFromBranch(*CondBr);
791  UncondCodeSeq.push_back(Cond);
792 
793  BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr,
794  {InvCond});
795 
796  // Decrement the successor count now that we've split one of the edges.
797  // We need to keep the count of edges to the successor accurate in order
798  // to know above when to *replace* the successor in the CFG vs. just
799  // adding the new successor.
800  --SuccCount;
801  }
802 
803  // Since we may have split edges and changed the number of successors,
804  // normalize the probabilities. This avoids doing it each time we split an
805  // edge.
806  MBB.normalizeSuccProbs();
807 
808  // Finally, we need to insert cmovs into the "fallthrough" edge. Here, we
809  // need to intersect the other condition codes. We can do this by just
810  // doing a cmov for each one.
811  if (!UncondSucc)
812  // If we have no fallthrough to protect (perhaps it is an indirect jump?)
813  // just skip this and continue.
814  continue;
815 
816  assert(SuccCounts[UncondSucc] == 1 &&
817  "We should never have more than one edge to the unconditional "
818  "successor at this point because every other edge must have been "
819  "split above!");
820 
821  // Sort and unique the codes to minimize them.
822  llvm::sort(UncondCodeSeq);
823  UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
824  UncondCodeSeq.end());
825 
826  // Build a checking version of the successor.
827  BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, /*SuccCount*/ 1,
828  UncondBr, UncondBr, UncondCodeSeq);
829  }
830 
831  return CMovs;
832 }
833 
834 /// Compute the register class for the unfolded load.
835 ///
836 /// FIXME: This should probably live in X86InstrInfo, potentially by adding
837 /// a way to unfold into a newly created vreg rather than requiring a register
838 /// input.
839 static const TargetRegisterClass *
841  unsigned Opcode) {
842  unsigned Index;
843  unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
844  Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
845  const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
846  return TII.getRegClass(MCID, Index, &TII.getRegisterInfo(), MF);
847 }
848 
849 void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
850  MachineFunction &MF) {
851  for (MachineBasicBlock &MBB : MF)
852  for (auto MII = MBB.instr_begin(), MIE = MBB.instr_end(); MII != MIE;) {
853  // Grab a reference and increment the iterator so we can remove this
854  // instruction if needed without disturbing the iteration.
855  MachineInstr &MI = *MII++;
856 
857  // Must either be a call or a branch.
858  if (!MI.isCall() && !MI.isBranch())
859  continue;
860  // We only care about loading variants of these instructions.
861  if (!MI.mayLoad())
862  continue;
863 
864  switch (MI.getOpcode()) {
865  default: {
866  LLVM_DEBUG(
867  dbgs() << "ERROR: Found an unexpected loading branch or call "
868  "instruction:\n";
869  MI.dump(); dbgs() << "\n");
870  report_fatal_error("Unexpected loading branch or call!");
871  }
872 
873  case X86::FARCALL16m:
874  case X86::FARCALL32m:
875  case X86::FARCALL64:
876  case X86::FARJMP16m:
877  case X86::FARJMP32m:
878  case X86::FARJMP64:
879  // We cannot mitigate far jumps or calls, but we also don't expect them
880  // to be vulnerable to Spectre v1.2 style attacks.
881  continue;
882 
883  case X86::CALL16m:
884  case X86::CALL16m_NT:
885  case X86::CALL32m:
886  case X86::CALL32m_NT:
887  case X86::CALL64m:
888  case X86::CALL64m_NT:
889  case X86::JMP16m:
890  case X86::JMP16m_NT:
891  case X86::JMP32m:
892  case X86::JMP32m_NT:
893  case X86::JMP64m:
894  case X86::JMP64m_NT:
895  case X86::TAILJMPm64:
896  case X86::TAILJMPm64_REX:
897  case X86::TAILJMPm:
898  case X86::TCRETURNmi64:
899  case X86::TCRETURNmi: {
900  // Use the generic unfold logic now that we know we're dealing with
901  // expected instructions.
902  // FIXME: We don't have test coverage for all of these!
903  auto *UnfoldedRC = getRegClassForUnfoldedLoad(MF, *TII, MI.getOpcode());
904  if (!UnfoldedRC) {
905  LLVM_DEBUG(dbgs()
906  << "ERROR: Unable to unfold load from instruction:\n";
907  MI.dump(); dbgs() << "\n");
908  report_fatal_error("Unable to unfold load!");
909  }
910  Register Reg = MRI->createVirtualRegister(UnfoldedRC);
912  // If we were able to compute an unfolded reg class, any failure here
913  // is just a programming error so just assert.
914  bool Unfolded =
915  TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true,
916  /*UnfoldStore*/ false, NewMIs);
917  (void)Unfolded;
918  assert(Unfolded &&
919  "Computed unfolded register class but failed to unfold");
920  // Now stitch the new instructions into place and erase the old one.
921  for (auto *NewMI : NewMIs)
922  MBB.insert(MI.getIterator(), NewMI);
923  MI.eraseFromParent();
924  LLVM_DEBUG({
925  dbgs() << "Unfolded load successfully into:\n";
926  for (auto *NewMI : NewMIs) {
927  NewMI->dump();
928  dbgs() << "\n";
929  }
930  });
931  continue;
932  }
933  }
934  llvm_unreachable("Escaped switch with default!");
935  }
936 }
937 
938 /// Trace the predicate state through indirect branches, instrumenting them to
939 /// poison the state if a target is reached that does not match the expected
940 /// target.
941 ///
942 /// This is designed to mitigate Spectre variant 1 attacks where an indirect
943 /// branch is trained to predict a particular target and then mispredicts that
944 /// target in a way that can leak data. Despite using an indirect branch, this
945 /// is really a variant 1 style attack: it does not steer execution to an
946 /// arbitrary or attacker controlled address, and it does not require any
947 /// special code executing next to the victim. This attack can also be mitigated
948 /// through retpolines, but those require either replacing indirect branches
949 /// with conditional direct branches or lowering them through a device that
950 /// blocks speculation. This mitigation can replace these retpoline-style
951 /// mitigations for jump tables and other indirect branches within a function
952 /// when variant 2 isn't a risk while allowing limited speculation. Indirect
953 /// calls, however, cannot be mitigated through this technique without changing
954 /// the ABI in a fundamental way.
956 X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
957  MachineFunction &MF) {
958  // We use the SSAUpdater to insert PHI nodes for the target addresses of
959  // indirect branches. We don't actually need the full power of the SSA updater
960  // in this particular case as we always have immediately available values, but
961  // this avoids us having to re-implement the PHI construction logic.
962  MachineSSAUpdater TargetAddrSSA(MF);
963  TargetAddrSSA.Initialize(MRI->createVirtualRegister(&X86::GR64RegClass));
964 
965  // Track which blocks were terminated with an indirect branch.
966  SmallPtrSet<MachineBasicBlock *, 4> IndirectTerminatedMBBs;
967 
968  // We need to know what blocks end up reached via indirect branches. We
969  // expect this to be a subset of those whose address is taken and so track it
970  // directly via the CFG.
971  SmallPtrSet<MachineBasicBlock *, 4> IndirectTargetMBBs;
972 
973  // Walk all the blocks which end in an indirect branch and make the
974  // target address available.
975  for (MachineBasicBlock &MBB : MF) {
976  // Find the last terminator.
977  auto MII = MBB.instr_rbegin();
978  while (MII != MBB.instr_rend() && MII->isDebugInstr())
979  ++MII;
980  if (MII == MBB.instr_rend())
981  continue;
982  MachineInstr &TI = *MII;
983  if (!TI.isTerminator() || !TI.isBranch())
984  // No terminator or non-branch terminator.
985  continue;
986 
987  unsigned TargetReg;
988 
989  switch (TI.getOpcode()) {
990  default:
991  // Direct branch or conditional branch (leading to fallthrough).
992  continue;
993 
994  case X86::FARJMP16m:
995  case X86::FARJMP32m:
996  case X86::FARJMP64:
997  // We cannot mitigate far jumps or calls, but we also don't expect them
998  // to be vulnerable to Spectre v1.2 or v2 (self trained) style attacks.
999  continue;
1000 
1001  case X86::JMP16m:
1002  case X86::JMP16m_NT:
1003  case X86::JMP32m:
1004  case X86::JMP32m_NT:
1005  case X86::JMP64m:
1006  case X86::JMP64m_NT:
1007  // Mostly as documentation.
1008  report_fatal_error("Memory operand jumps should have been unfolded!");
1009 
1010  case X86::JMP16r:
1012  "Support for 16-bit indirect branches is not implemented.");
1013  case X86::JMP32r:
1015  "Support for 32-bit indirect branches is not implemented.");
1016 
1017  case X86::JMP64r:
1018  TargetReg = TI.getOperand(0).getReg();
1019  }
1020 
1021  // We have definitely found an indirect branch. Verify that there are no
1022  // preceding conditional branches as we don't yet support that.
1023  if (llvm::any_of(MBB.terminators(), [&](MachineInstr &OtherTI) {
1024  return !OtherTI.isDebugInstr() && &OtherTI != &TI;
1025  })) {
1026  LLVM_DEBUG({
1027  dbgs() << "ERROR: Found other terminators in a block with an indirect "
1028  "branch! This is not yet supported! Terminator sequence:\n";
1029  for (MachineInstr &MI : MBB.terminators()) {
1030  MI.dump();
1031  dbgs() << '\n';
1032  }
1033  });
1034  report_fatal_error("Unimplemented terminator sequence!");
1035  }
1036 
1037  // Make the target register an available value for this block.
1038  TargetAddrSSA.AddAvailableValue(&MBB, TargetReg);
1039  IndirectTerminatedMBBs.insert(&MBB);
1040 
1041  // Add all the successors to our target candidates.
1042  for (MachineBasicBlock *Succ : MBB.successors())
1043  IndirectTargetMBBs.insert(Succ);
1044  }
1045 
1046  // Keep track of the cmov instructions we insert so we can return them.
1048 
1049  // If we didn't find any indirect branches with targets, nothing to do here.
1050  if (IndirectTargetMBBs.empty())
1051  return CMovs;
1052 
1053  // We found indirect branches and targets that need to be instrumented to
1054  // harden loads within them. Walk the blocks of the function (to get a stable
1055  // ordering) and instrument each target of an indirect branch.
1056  for (MachineBasicBlock &MBB : MF) {
1057  // Skip the blocks that aren't candidate targets.
1058  if (!IndirectTargetMBBs.count(&MBB))
1059  continue;
1060 
1061  // We don't expect EH pads to ever be reached via an indirect branch. If
1062  // this is desired for some reason, we could simply skip them here rather
1063  // than asserting.
1064  assert(!MBB.isEHPad() &&
1065  "Unexpected EH pad as target of an indirect branch!");
1066 
1067  // We should never end up threading EFLAGS into a block to harden
1068  // conditional jumps as there would be an additional successor via the
1069  // indirect branch. As a consequence, all such edges would be split before
1070  // reaching here, and the inserted block will handle the EFLAGS-based
1071  // hardening.
1072  assert(!MBB.isLiveIn(X86::EFLAGS) &&
1073  "Cannot check within a block that already has live-in EFLAGS!");
1074 
1075  // We can't handle having non-indirect edges into this block unless this is
1076  // the only successor and we can synthesize the necessary target address.
1077  for (MachineBasicBlock *Pred : MBB.predecessors()) {
1078  // If we've already handled this by extracting the target directly,
1079  // nothing to do.
1080  if (IndirectTerminatedMBBs.count(Pred))
1081  continue;
1082 
1083  // Otherwise, we have to be the only successor. We generally expect this
1084  // to be true as conditional branches should have had a critical edge
1085  // split already. We don't however need to worry about EH pad successors
1086  // as they'll happily ignore the target and their hardening strategy is
1087  // resilient to all ways in which they could be reached speculatively.
1088  if (!llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) {
1089  return Succ->isEHPad() || Succ == &MBB;
1090  })) {
1091  LLVM_DEBUG({
1092  dbgs() << "ERROR: Found conditional entry to target of indirect "
1093  "branch!\n";
1094  Pred->dump();
1095  MBB.dump();
1096  });
1097  report_fatal_error("Cannot harden a conditional entry to a target of "
1098  "an indirect branch!");
1099  }
1100 
1101  // Now we need to compute the address of this block and install it as a
1102  // synthetic target in the predecessor. We do this at the bottom of the
1103  // predecessor.
1104  auto InsertPt = Pred->getFirstTerminator();
1105  Register TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1106  if (MF.getTarget().getCodeModel() == CodeModel::Small &&
1107  !Subtarget->isPositionIndependent()) {
1108  // Directly materialize it into an immediate.
1109  auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(),
1110  TII->get(X86::MOV64ri32), TargetReg)
1111  .addMBB(&MBB);
1112  ++NumInstsInserted;
1113  (void)AddrI;
1114  LLVM_DEBUG(dbgs() << " Inserting mov: "; AddrI->dump();
1115  dbgs() << "\n");
1116  } else {
1117  auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(), TII->get(X86::LEA64r),
1118  TargetReg)
1119  .addReg(/*Base*/ X86::RIP)
1120  .addImm(/*Scale*/ 1)
1121  .addReg(/*Index*/ 0)
1122  .addMBB(&MBB)
1123  .addReg(/*Segment*/ 0);
1124  ++NumInstsInserted;
1125  (void)AddrI;
1126  LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump();
1127  dbgs() << "\n");
1128  }
1129  // And make this available.
1130  TargetAddrSSA.AddAvailableValue(Pred, TargetReg);
1131  }
1132 
1133  // Materialize the needed SSA value of the target. Note that we need the
1134  // middle of the block as this block might at the bottom have an indirect
1135  // branch back to itself. We can do this here because at this point, every
1136  // predecessor of this block has an available value. This is basically just
1137  // automating the construction of a PHI node for this target.
1138  unsigned TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
1139 
1140  // Insert a comparison of the incoming target register with this block's
1141  // address. This also requires us to mark the block as having its address
1142  // taken explicitly.
1143  MBB.setHasAddressTaken();
1144  auto InsertPt = MBB.SkipPHIsLabelsAndDebug(MBB.begin());
1145  if (MF.getTarget().getCodeModel() == CodeModel::Small &&
1146  !Subtarget->isPositionIndependent()) {
1147  // Check directly against a relocated immediate when we can.
1148  auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64ri32))
1149  .addReg(TargetReg, RegState::Kill)
1150  .addMBB(&MBB);
1151  ++NumInstsInserted;
1152  (void)CheckI;
1153  LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
1154  } else {
1155  // Otherwise compute the address into a register first.
1156  Register AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1157  auto AddrI =
1158  BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg)
1159  .addReg(/*Base*/ X86::RIP)
1160  .addImm(/*Scale*/ 1)
1161  .addReg(/*Index*/ 0)
1162  .addMBB(&MBB)
1163  .addReg(/*Segment*/ 0);
1164  ++NumInstsInserted;
1165  (void)AddrI;
1166  LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); dbgs() << "\n");
1167  auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
1168  .addReg(TargetReg, RegState::Kill)
1169  .addReg(AddrReg, RegState::Kill);
1170  ++NumInstsInserted;
1171  (void)CheckI;
1172  LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
1173  }
1174 
1175  // Now cmov over the predicate if the comparison wasn't equal.
1176  int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
1177  auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
1178  Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
1179  auto CMovI =
1180  BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg)
1181  .addReg(PS->InitialReg)
1182  .addReg(PS->PoisonReg)
1183  .addImm(X86::COND_NE);
1184  CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
1185  ++NumInstsInserted;
1186  LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
1187  CMovs.push_back(&*CMovI);
1188 
1189  // And put the new value into the available values for SSA form of our
1190  // predicate state.
1191  PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
1192  }
1193 
1194  // Return all the newly inserted cmov instructions of the predicate state.
1195  return CMovs;
1196 }
1197 
1198 /// Returns true if the instruction has no behavior (specified or otherwise)
1199 /// that is based on the value of any of its register operands
1200 ///
1201 /// A classical example of something that is inherently not data invariant is an
1202 /// indirect jump -- the destination is loaded into icache based on the bits set
1203 /// in the jump destination register.
1204 ///
1205 /// FIXME: This should become part of our instruction tables.
1207  switch (MI.getOpcode()) {
1208  default:
1209  // By default, assume that the instruction is not data invariant.
1210  return false;
1211 
1212  // Some target-independent operations that trivially lower to data-invariant
1213  // instructions.
1214  case TargetOpcode::COPY:
1215  case TargetOpcode::INSERT_SUBREG:
1216  case TargetOpcode::SUBREG_TO_REG:
1217  return true;
1218 
1219  // On x86 it is believed that imul is constant time w.r.t. the loaded data.
1220  // However, they set flags and are perhaps the most surprisingly constant
1221  // time operations so we call them out here separately.
1222  case X86::IMUL16rr:
1223  case X86::IMUL16rri8:
1224  case X86::IMUL16rri:
1225  case X86::IMUL32rr:
1226  case X86::IMUL32rri8:
1227  case X86::IMUL32rri:
1228  case X86::IMUL64rr:
1229  case X86::IMUL64rri32:
1230  case X86::IMUL64rri8:
1231 
1232  // Bit scanning and counting instructions that are somewhat surprisingly
1233  // constant time as they scan across bits and do other fairly complex
1234  // operations like popcnt, but are believed to be constant time on x86.
1235  // However, these set flags.
1236  case X86::BSF16rr:
1237  case X86::BSF32rr:
1238  case X86::BSF64rr:
1239  case X86::BSR16rr:
1240  case X86::BSR32rr:
1241  case X86::BSR64rr:
1242  case X86::LZCNT16rr:
1243  case X86::LZCNT32rr:
1244  case X86::LZCNT64rr:
1245  case X86::POPCNT16rr:
1246  case X86::POPCNT32rr:
1247  case X86::POPCNT64rr:
1248  case X86::TZCNT16rr:
1249  case X86::TZCNT32rr:
1250  case X86::TZCNT64rr:
1251 
1252  // Bit manipulation instructions are effectively combinations of basic
1253  // arithmetic ops, and should still execute in constant time. These also
1254  // set flags.
1255  case X86::BLCFILL32rr:
1256  case X86::BLCFILL64rr:
1257  case X86::BLCI32rr:
1258  case X86::BLCI64rr:
1259  case X86::BLCIC32rr:
1260  case X86::BLCIC64rr:
1261  case X86::BLCMSK32rr:
1262  case X86::BLCMSK64rr:
1263  case X86::BLCS32rr:
1264  case X86::BLCS64rr:
1265  case X86::BLSFILL32rr:
1266  case X86::BLSFILL64rr:
1267  case X86::BLSI32rr:
1268  case X86::BLSI64rr:
1269  case X86::BLSIC32rr:
1270  case X86::BLSIC64rr:
1271  case X86::BLSMSK32rr:
1272  case X86::BLSMSK64rr:
1273  case X86::BLSR32rr:
1274  case X86::BLSR64rr:
1275  case X86::TZMSK32rr:
1276  case X86::TZMSK64rr:
1277 
1278  // Bit extracting and clearing instructions should execute in constant time,
1279  // and set flags.
1280  case X86::BEXTR32rr:
1281  case X86::BEXTR64rr:
1282  case X86::BEXTRI32ri:
1283  case X86::BEXTRI64ri:
1284  case X86::BZHI32rr:
1285  case X86::BZHI64rr:
1286 
1287  // Shift and rotate.
1288  case X86::ROL8r1: case X86::ROL16r1: case X86::ROL32r1: case X86::ROL64r1:
1289  case X86::ROL8rCL: case X86::ROL16rCL: case X86::ROL32rCL: case X86::ROL64rCL:
1290  case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri:
1291  case X86::ROR8r1: case X86::ROR16r1: case X86::ROR32r1: case X86::ROR64r1:
1292  case X86::ROR8rCL: case X86::ROR16rCL: case X86::ROR32rCL: case X86::ROR64rCL:
1293  case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri:
1294  case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1: case X86::SAR64r1:
1295  case X86::SAR8rCL: case X86::SAR16rCL: case X86::SAR32rCL: case X86::SAR64rCL:
1296  case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri:
1297  case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1: case X86::SHL64r1:
1298  case X86::SHL8rCL: case X86::SHL16rCL: case X86::SHL32rCL: case X86::SHL64rCL:
1299  case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri:
1300  case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1: case X86::SHR64r1:
1301  case X86::SHR8rCL: case X86::SHR16rCL: case X86::SHR32rCL: case X86::SHR64rCL:
1302  case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri:
1303  case X86::SHLD16rrCL: case X86::SHLD32rrCL: case X86::SHLD64rrCL:
1304  case X86::SHLD16rri8: case X86::SHLD32rri8: case X86::SHLD64rri8:
1305  case X86::SHRD16rrCL: case X86::SHRD32rrCL: case X86::SHRD64rrCL:
1306  case X86::SHRD16rri8: case X86::SHRD32rri8: case X86::SHRD64rri8:
1307 
1308  // Basic arithmetic is constant time on the input but does set flags.
1309  case X86::ADC8rr: case X86::ADC8ri:
1310  case X86::ADC16rr: case X86::ADC16ri: case X86::ADC16ri8:
1311  case X86::ADC32rr: case X86::ADC32ri: case X86::ADC32ri8:
1312  case X86::ADC64rr: case X86::ADC64ri8: case X86::ADC64ri32:
1313  case X86::ADD8rr: case X86::ADD8ri:
1314  case X86::ADD16rr: case X86::ADD16ri: case X86::ADD16ri8:
1315  case X86::ADD32rr: case X86::ADD32ri: case X86::ADD32ri8:
1316  case X86::ADD64rr: case X86::ADD64ri8: case X86::ADD64ri32:
1317  case X86::AND8rr: case X86::AND8ri:
1318  case X86::AND16rr: case X86::AND16ri: case X86::AND16ri8:
1319  case X86::AND32rr: case X86::AND32ri: case X86::AND32ri8:
1320  case X86::AND64rr: case X86::AND64ri8: case X86::AND64ri32:
1321  case X86::OR8rr: case X86::OR8ri:
1322  case X86::OR16rr: case X86::OR16ri: case X86::OR16ri8:
1323  case X86::OR32rr: case X86::OR32ri: case X86::OR32ri8:
1324  case X86::OR64rr: case X86::OR64ri8: case X86::OR64ri32:
1325  case X86::SBB8rr: case X86::SBB8ri:
1326  case X86::SBB16rr: case X86::SBB16ri: case X86::SBB16ri8:
1327  case X86::SBB32rr: case X86::SBB32ri: case X86::SBB32ri8:
1328  case X86::SBB64rr: case X86::SBB64ri8: case X86::SBB64ri32:
1329  case X86::SUB8rr: case X86::SUB8ri:
1330  case X86::SUB16rr: case X86::SUB16ri: case X86::SUB16ri8:
1331  case X86::SUB32rr: case X86::SUB32ri: case X86::SUB32ri8:
1332  case X86::SUB64rr: case X86::SUB64ri8: case X86::SUB64ri32:
1333  case X86::XOR8rr: case X86::XOR8ri:
1334  case X86::XOR16rr: case X86::XOR16ri: case X86::XOR16ri8:
1335  case X86::XOR32rr: case X86::XOR32ri: case X86::XOR32ri8:
1336  case X86::XOR64rr: case X86::XOR64ri8: case X86::XOR64ri32:
1337  // Arithmetic with just 32-bit and 64-bit variants and no immediates.
1338  case X86::ADCX32rr: case X86::ADCX64rr:
1339  case X86::ADOX32rr: case X86::ADOX64rr:
1340  case X86::ANDN32rr: case X86::ANDN64rr:
1341  // Unary arithmetic operations.
1342  case X86::DEC8r: case X86::DEC16r: case X86::DEC32r: case X86::DEC64r:
1343  case X86::INC8r: case X86::INC16r: case X86::INC32r: case X86::INC64r:
1344  case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
1345  // Check whether the EFLAGS implicit-def is dead. We assume that this will
1346  // always find the implicit-def because this code should only be reached
1347  // for instructions that do in fact implicitly def this.
1348  if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) {
1349  // If we would clobber EFLAGS that are used, just bail for now.
1350  LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: ";
1351  MI.dump(); dbgs() << "\n");
1352  return false;
1353  }
1354 
1355  // Otherwise, fallthrough to handle these the same as instructions that
1356  // don't set EFLAGS.
1358 
1359  // Unlike other arithmetic, NOT doesn't set EFLAGS.
1360  case X86::NOT8r: case X86::NOT16r: case X86::NOT32r: case X86::NOT64r:
1361 
1362  // Various move instructions used to zero or sign extend things. Note that we
1363  // intentionally don't support the _NOREX variants as we can't handle that
1364  // register constraint anyways.
1365  case X86::MOVSX16rr8:
1366  case X86::MOVSX32rr8: case X86::MOVSX32rr16:
1367  case X86::MOVSX64rr8: case X86::MOVSX64rr16: case X86::MOVSX64rr32:
1368  case X86::MOVZX16rr8:
1369  case X86::MOVZX32rr8: case X86::MOVZX32rr16:
1370  case X86::MOVZX64rr8: case X86::MOVZX64rr16:
1371  case X86::MOV32rr:
1372 
1373  // Arithmetic instructions that are both constant time and don't set flags.
1374  case X86::RORX32ri:
1375  case X86::RORX64ri:
1376  case X86::SARX32rr:
1377  case X86::SARX64rr:
1378  case X86::SHLX32rr:
1379  case X86::SHLX64rr:
1380  case X86::SHRX32rr:
1381  case X86::SHRX64rr:
1382 
1383  // LEA doesn't actually access memory, and its arithmetic is constant time.
1384  case X86::LEA16r:
1385  case X86::LEA32r:
1386  case X86::LEA64_32r:
1387  case X86::LEA64r:
1388  return true;
1389  }
1390 }
1391 
1392 /// Returns true if the instruction has no behavior (specified or otherwise)
1393 /// that is based on the value loaded from memory or the value of any
1394 /// non-address register operands.
1395 ///
1396 /// For example, if the latency of the instruction is dependent on the
1397 /// particular bits set in any of the registers *or* any of the bits loaded from
1398 /// memory.
1399 ///
1400 /// A classical example of something that is inherently not data invariant is an
1401 /// indirect jump -- the destination is loaded into icache based on the bits set
1402 /// in the jump destination register.
1403 ///
1404 /// FIXME: This should become part of our instruction tables.
1406  switch (MI.getOpcode()) {
1407  default:
1408  // By default, assume that the load will immediately leak.
1409  return false;
1410 
1411  // On x86 it is believed that imul is constant time w.r.t. the loaded data.
1412  // However, they set flags and are perhaps the most surprisingly constant
1413  // time operations so we call them out here separately.
1414  case X86::IMUL16rm:
1415  case X86::IMUL16rmi8:
1416  case X86::IMUL16rmi:
1417  case X86::IMUL32rm:
1418  case X86::IMUL32rmi8:
1419  case X86::IMUL32rmi:
1420  case X86::IMUL64rm:
1421  case X86::IMUL64rmi32:
1422  case X86::IMUL64rmi8:
1423 
1424  // Bit scanning and counting instructions that are somewhat surprisingly
1425  // constant time as they scan across bits and do other fairly complex
1426  // operations like popcnt, but are believed to be constant time on x86.
1427  // However, these set flags.
1428  case X86::BSF16rm:
1429  case X86::BSF32rm:
1430  case X86::BSF64rm:
1431  case X86::BSR16rm:
1432  case X86::BSR32rm:
1433  case X86::BSR64rm:
1434  case X86::LZCNT16rm:
1435  case X86::LZCNT32rm:
1436  case X86::LZCNT64rm:
1437  case X86::POPCNT16rm:
1438  case X86::POPCNT32rm:
1439  case X86::POPCNT64rm:
1440  case X86::TZCNT16rm:
1441  case X86::TZCNT32rm:
1442  case X86::TZCNT64rm:
1443 
1444  // Bit manipulation instructions are effectively combinations of basic
1445  // arithmetic ops, and should still execute in constant time. These also
1446  // set flags.
1447  case X86::BLCFILL32rm:
1448  case X86::BLCFILL64rm:
1449  case X86::BLCI32rm:
1450  case X86::BLCI64rm:
1451  case X86::BLCIC32rm:
1452  case X86::BLCIC64rm:
1453  case X86::BLCMSK32rm:
1454  case X86::BLCMSK64rm:
1455  case X86::BLCS32rm:
1456  case X86::BLCS64rm:
1457  case X86::BLSFILL32rm:
1458  case X86::BLSFILL64rm:
1459  case X86::BLSI32rm:
1460  case X86::BLSI64rm:
1461  case X86::BLSIC32rm:
1462  case X86::BLSIC64rm:
1463  case X86::BLSMSK32rm:
1464  case X86::BLSMSK64rm:
1465  case X86::BLSR32rm:
1466  case X86::BLSR64rm:
1467  case X86::TZMSK32rm:
1468  case X86::TZMSK64rm:
1469 
1470  // Bit extracting and clearing instructions should execute in constant time,
1471  // and set flags.
1472  case X86::BEXTR32rm:
1473  case X86::BEXTR64rm:
1474  case X86::BEXTRI32mi:
1475  case X86::BEXTRI64mi:
1476  case X86::BZHI32rm:
1477  case X86::BZHI64rm:
1478 
1479  // Basic arithmetic is constant time on the input but does set flags.
1480  case X86::ADC8rm:
1481  case X86::ADC16rm:
1482  case X86::ADC32rm:
1483  case X86::ADC64rm:
1484  case X86::ADCX32rm:
1485  case X86::ADCX64rm:
1486  case X86::ADD8rm:
1487  case X86::ADD16rm:
1488  case X86::ADD32rm:
1489  case X86::ADD64rm:
1490  case X86::ADOX32rm:
1491  case X86::ADOX64rm:
1492  case X86::AND8rm:
1493  case X86::AND16rm:
1494  case X86::AND32rm:
1495  case X86::AND64rm:
1496  case X86::ANDN32rm:
1497  case X86::ANDN64rm:
1498  case X86::OR8rm:
1499  case X86::OR16rm:
1500  case X86::OR32rm:
1501  case X86::OR64rm:
1502  case X86::SBB8rm:
1503  case X86::SBB16rm:
1504  case X86::SBB32rm:
1505  case X86::SBB64rm:
1506  case X86::SUB8rm:
1507  case X86::SUB16rm:
1508  case X86::SUB32rm:
1509  case X86::SUB64rm:
1510  case X86::XOR8rm:
1511  case X86::XOR16rm:
1512  case X86::XOR32rm:
1513  case X86::XOR64rm:
1514  // Check whether the EFLAGS implicit-def is dead. We assume that this will
1515  // always find the implicit-def because this code should only be reached
1516  // for instructions that do in fact implicitly def this.
1517  if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) {
1518  // If we would clobber EFLAGS that are used, just bail for now.
1519  LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: ";
1520  MI.dump(); dbgs() << "\n");
1521  return false;
1522  }
1523 
1524  // Otherwise, fallthrough to handle these the same as instructions that
1525  // don't set EFLAGS.
1527 
1528  // Integer multiply w/o affecting flags is still believed to be constant
1529  // time on x86. Called out separately as this is among the most surprising
1530  // instructions to exhibit that behavior.
1531  case X86::MULX32rm:
1532  case X86::MULX64rm:
1533 
1534  // Arithmetic instructions that are both constant time and don't set flags.
1535  case X86::RORX32mi:
1536  case X86::RORX64mi:
1537  case X86::SARX32rm:
1538  case X86::SARX64rm:
1539  case X86::SHLX32rm:
1540  case X86::SHLX64rm:
1541  case X86::SHRX32rm:
1542  case X86::SHRX64rm:
1543 
1544  // Conversions are believed to be constant time and don't set flags.
1545  case X86::CVTTSD2SI64rm: case X86::VCVTTSD2SI64rm: case X86::VCVTTSD2SI64Zrm:
1546  case X86::CVTTSD2SIrm: case X86::VCVTTSD2SIrm: case X86::VCVTTSD2SIZrm:
1547  case X86::CVTTSS2SI64rm: case X86::VCVTTSS2SI64rm: case X86::VCVTTSS2SI64Zrm:
1548  case X86::CVTTSS2SIrm: case X86::VCVTTSS2SIrm: case X86::VCVTTSS2SIZrm:
1549  case X86::CVTSI2SDrm: case X86::VCVTSI2SDrm: case X86::VCVTSI2SDZrm:
1550  case X86::CVTSI2SSrm: case X86::VCVTSI2SSrm: case X86::VCVTSI2SSZrm:
1551  case X86::CVTSI642SDrm: case X86::VCVTSI642SDrm: case X86::VCVTSI642SDZrm:
1552  case X86::CVTSI642SSrm: case X86::VCVTSI642SSrm: case X86::VCVTSI642SSZrm:
1553  case X86::CVTSS2SDrm: case X86::VCVTSS2SDrm: case X86::VCVTSS2SDZrm:
1554  case X86::CVTSD2SSrm: case X86::VCVTSD2SSrm: case X86::VCVTSD2SSZrm:
1555  // AVX512 added unsigned integer conversions.
1556  case X86::VCVTTSD2USI64Zrm:
1557  case X86::VCVTTSD2USIZrm:
1558  case X86::VCVTTSS2USI64Zrm:
1559  case X86::VCVTTSS2USIZrm:
1560  case X86::VCVTUSI2SDZrm:
1561  case X86::VCVTUSI642SDZrm:
1562  case X86::VCVTUSI2SSZrm:
1563  case X86::VCVTUSI642SSZrm:
1564 
1565  // Loads to register don't set flags.
1566  case X86::MOV8rm:
1567  case X86::MOV8rm_NOREX:
1568  case X86::MOV16rm:
1569  case X86::MOV32rm:
1570  case X86::MOV64rm:
1571  case X86::MOVSX16rm8:
1572  case X86::MOVSX32rm16:
1573  case X86::MOVSX32rm8:
1574  case X86::MOVSX32rm8_NOREX:
1575  case X86::MOVSX64rm16:
1576  case X86::MOVSX64rm32:
1577  case X86::MOVSX64rm8:
1578  case X86::MOVZX16rm8:
1579  case X86::MOVZX32rm16:
1580  case X86::MOVZX32rm8:
1581  case X86::MOVZX32rm8_NOREX:
1582  case X86::MOVZX64rm16:
1583  case X86::MOVZX64rm8:
1584  return true;
1585  }
1586 }
1587 
1589  const TargetRegisterInfo &TRI) {
1590  // Check if EFLAGS are alive by seeing if there is a def of them or they
1591  // live-in, and then seeing if that def is in turn used.
1592  for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) {
1593  if (MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
1594  // If the def is dead, then EFLAGS is not live.
1595  if (DefOp->isDead())
1596  return false;
1597 
1598  // Otherwise we've def'ed it, and it is live.
1599  return true;
1600  }
1601  // While at this instruction, also check if we use and kill EFLAGS
1602  // which means it isn't live.
1603  if (MI.killsRegister(X86::EFLAGS, &TRI))
1604  return false;
1605  }
1606 
1607  // If we didn't find anything conclusive (neither definitely alive or
1608  // definitely dead) return whether it lives into the block.
1609  return MBB.isLiveIn(X86::EFLAGS);
1610 }
1611 
1612 /// Trace the predicate state through each of the blocks in the function,
1613 /// hardening everything necessary along the way.
1614 ///
1615 /// We call this routine once the initial predicate state has been established
1616 /// for each basic block in the function in the SSA updater. This routine traces
1617 /// it through the instructions within each basic block, and for non-returning
1618 /// blocks informs the SSA updater about the final state that lives out of the
1619 /// block. Along the way, it hardens any vulnerable instruction using the
1620 /// currently valid predicate state. We have to do these two things together
1621 /// because the SSA updater only works across blocks. Within a block, we track
1622 /// the current predicate state directly and update it as it changes.
1623 ///
1624 /// This operates in two passes over each block. First, we analyze the loads in
1625 /// the block to determine which strategy will be used to harden them: hardening
1626 /// the address or hardening the loaded value when loaded into a register
1627 /// amenable to hardening. We have to process these first because the two
1628 /// strategies may interact -- later hardening may change what strategy we wish
1629 /// to use. We also will analyze data dependencies between loads and avoid
1630 /// hardening those loads that are data dependent on a load with a hardened
1631 /// address. We also skip hardening loads already behind an LFENCE as that is
1632 /// sufficient to harden them against misspeculation.
1633 ///
1634 /// Second, we actively trace the predicate state through the block, applying
1635 /// the hardening steps we determined necessary in the first pass as we go.
1636 ///
1637 /// These two passes are applied to each basic block. We operate one block at a
1638 /// time to simplify reasoning about reachability and sequencing.
1639 void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
1640  MachineFunction &MF) {
1641  SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
1642  SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
1643 
1644  SmallSet<unsigned, 16> HardenedAddrRegs;
1645 
1646  SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg;
1647 
1648  // Track the set of load-dependent registers through the basic block. Because
1649  // the values of these registers have an existing data dependency on a loaded
1650  // value which we would have checked, we can omit any checks on them.
1651  SparseBitVector<> LoadDepRegs;
1652 
1653  for (MachineBasicBlock &MBB : MF) {
1654  // The first pass over the block: collect all the loads which can have their
1655  // loaded value hardened and all the loads that instead need their address
1656  // hardened. During this walk we propagate load dependence for address
1657  // hardened loads and also look for LFENCE to stop hardening wherever
1658  // possible. When deciding whether or not to harden the loaded value or not,
1659  // we check to see if any registers used in the address will have been
1660  // hardened at this point and if so, harden any remaining address registers
1661  // as that often successfully re-uses hardened addresses and minimizes
1662  // instructions.
1663  //
1664  // FIXME: We should consider an aggressive mode where we continue to keep as
1665  // many loads value hardened even when some address register hardening would
1666  // be free (due to reuse).
1667  //
1668  // Note that we only need this pass if we are actually hardening loads.
1669  if (HardenLoads)
1670  for (MachineInstr &MI : MBB) {
1671  // We naively assume that all def'ed registers of an instruction have
1672  // a data dependency on all of their operands.
1673  // FIXME: Do a more careful analysis of x86 to build a conservative
1674  // model here.
1675  if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
1676  return Op.isReg() && LoadDepRegs.test(Op.getReg());
1677  }))
1678  for (MachineOperand &Def : MI.defs())
1679  if (Def.isReg())
1680  LoadDepRegs.set(Def.getReg());
1681 
1682  // Both Intel and AMD are guiding that they will change the semantics of
1683  // LFENCE to be a speculation barrier, so if we see an LFENCE, there is
1684  // no more need to guard things in this block.
1685  if (MI.getOpcode() == X86::LFENCE)
1686  break;
1687 
1688  // If this instruction cannot load, nothing to do.
1689  if (!MI.mayLoad())
1690  continue;
1691 
1692  // Some instructions which "load" are trivially safe or unimportant.
1693  if (MI.getOpcode() == X86::MFENCE)
1694  continue;
1695 
1696  // Extract the memory operand information about this instruction.
1697  // FIXME: This doesn't handle loading pseudo instructions which we often
1698  // could handle with similarly generic logic. We probably need to add an
1699  // MI-layer routine similar to the MC-layer one we use here which maps
1700  // pseudos much like this maps real instructions.
1701  const MCInstrDesc &Desc = MI.getDesc();
1702  int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1703  if (MemRefBeginIdx < 0) {
1704  LLVM_DEBUG(dbgs()
1705  << "WARNING: unable to harden loading instruction: ";
1706  MI.dump());
1707  continue;
1708  }
1709 
1710  MemRefBeginIdx += X86II::getOperandBias(Desc);
1711 
1712  MachineOperand &BaseMO =
1713  MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1714  MachineOperand &IndexMO =
1715  MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1716 
1717  // If we have at least one (non-frame-index, non-RIP) register operand,
1718  // and neither operand is load-dependent, we need to check the load.
1719  unsigned BaseReg = 0, IndexReg = 0;
1720  if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
1721  BaseMO.getReg() != X86::NoRegister)
1722  BaseReg = BaseMO.getReg();
1723  if (IndexMO.getReg() != X86::NoRegister)
1724  IndexReg = IndexMO.getReg();
1725 
1726  if (!BaseReg && !IndexReg)
1727  // No register operands!
1728  continue;
1729 
1730  // If any register operand is dependent, this load is dependent and we
1731  // needn't check it.
1732  // FIXME: Is this true in the case where we are hardening loads after
1733  // they complete? Unclear, need to investigate.
1734  if ((BaseReg && LoadDepRegs.test(BaseReg)) ||
1735  (IndexReg && LoadDepRegs.test(IndexReg)))
1736  continue;
1737 
1738  // If post-load hardening is enabled, this load is compatible with
1739  // post-load hardening, and we aren't already going to harden one of the
1740  // address registers, queue it up to be hardened post-load. Notably,
1741  // even once hardened this won't introduce a useful dependency that
1742  // could prune out subsequent loads.
1744  MI.getDesc().getNumDefs() == 1 && MI.getOperand(0).isReg() &&
1745  canHardenRegister(MI.getOperand(0).getReg()) &&
1746  !HardenedAddrRegs.count(BaseReg) &&
1747  !HardenedAddrRegs.count(IndexReg)) {
1748  HardenPostLoad.insert(&MI);
1749  HardenedAddrRegs.insert(MI.getOperand(0).getReg());
1750  continue;
1751  }
1752 
1753  // Record this instruction for address hardening and record its register
1754  // operands as being address-hardened.
1755  HardenLoadAddr.insert(&MI);
1756  if (BaseReg)
1757  HardenedAddrRegs.insert(BaseReg);
1758  if (IndexReg)
1759  HardenedAddrRegs.insert(IndexReg);
1760 
1761  for (MachineOperand &Def : MI.defs())
1762  if (Def.isReg())
1763  LoadDepRegs.set(Def.getReg());
1764  }
1765 
1766  // Now re-walk the instructions in the basic block, and apply whichever
1767  // hardening strategy we have elected. Note that we do this in a second
1768  // pass specifically so that we have the complete set of instructions for
1769  // which we will do post-load hardening and can defer it in certain
1770  // circumstances.
1771  for (MachineInstr &MI : MBB) {
1772  if (HardenLoads) {
1773  // We cannot both require hardening the def of a load and its address.
1774  assert(!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) &&
1775  "Requested to harden both the address and def of a load!");
1776 
1777  // Check if this is a load whose address needs to be hardened.
1778  if (HardenLoadAddr.erase(&MI)) {
1779  const MCInstrDesc &Desc = MI.getDesc();
1780  int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1781  assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!");
1782 
1783  MemRefBeginIdx += X86II::getOperandBias(Desc);
1784 
1785  MachineOperand &BaseMO =
1786  MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1787  MachineOperand &IndexMO =
1788  MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1789  hardenLoadAddr(MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1790  continue;
1791  }
1792 
1793  // Test if this instruction is one of our post load instructions (and
1794  // remove it from the set if so).
1795  if (HardenPostLoad.erase(&MI)) {
1796  assert(!MI.isCall() && "Must not try to post-load harden a call!");
1797 
1798  // If this is a data-invariant load, we want to try and sink any
1799  // hardening as far as possible.
1800  if (isDataInvariantLoad(MI)) {
1801  // Sink the instruction we'll need to harden as far as we can down
1802  // the graph.
1803  MachineInstr *SunkMI = sinkPostLoadHardenedInst(MI, HardenPostLoad);
1804 
1805  // If we managed to sink this instruction, update everything so we
1806  // harden that instruction when we reach it in the instruction
1807  // sequence.
1808  if (SunkMI != &MI) {
1809  // If in sinking there was no instruction needing to be hardened,
1810  // we're done.
1811  if (!SunkMI)
1812  continue;
1813 
1814  // Otherwise, add this to the set of defs we harden.
1815  HardenPostLoad.insert(SunkMI);
1816  continue;
1817  }
1818  }
1819 
1820  unsigned HardenedReg = hardenPostLoad(MI);
1821 
1822  // Mark the resulting hardened register as such so we don't re-harden.
1823  AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1824 
1825  continue;
1826  }
1827 
1828  // Check for an indirect call or branch that may need its input hardened
1829  // even if we couldn't find the specific load used, or were able to
1830  // avoid hardening it for some reason. Note that here we cannot break
1831  // out afterward as we may still need to handle any call aspect of this
1832  // instruction.
1833  if ((MI.isCall() || MI.isBranch()) && HardenIndirectCallsAndJumps)
1834  hardenIndirectCallOrJumpInstr(MI, AddrRegToHardenedReg);
1835  }
1836 
1837  // After we finish hardening loads we handle interprocedural hardening if
1838  // enabled and relevant for this instruction.
1840  continue;
1841  if (!MI.isCall() && !MI.isReturn())
1842  continue;
1843 
1844  // If this is a direct return (IE, not a tail call) just directly harden
1845  // it.
1846  if (MI.isReturn() && !MI.isCall()) {
1847  hardenReturnInstr(MI);
1848  continue;
1849  }
1850 
1851  // Otherwise we have a call. We need to handle transferring the predicate
1852  // state into a call and recovering it after the call returns (unless this
1853  // is a tail call).
1854  assert(MI.isCall() && "Should only reach here for calls!");
1855  tracePredStateThroughCall(MI);
1856  }
1857 
1858  HardenPostLoad.clear();
1859  HardenLoadAddr.clear();
1860  HardenedAddrRegs.clear();
1861  AddrRegToHardenedReg.clear();
1862 
1863  // Currently, we only track data-dependent loads within a basic block.
1864  // FIXME: We should see if this is necessary or if we could be more
1865  // aggressive here without opening up attack avenues.
1866  LoadDepRegs.clear();
1867  }
1868 }
1869 
1870 /// Save EFLAGS into the returned GPR. This can in turn be restored with
1871 /// `restoreEFLAGS`.
1872 ///
1873 /// Note that LLVM can only lower very simple patterns of saved and restored
1874 /// EFLAGS registers. The restore should always be within the same basic block
1875 /// as the save so that no PHI nodes are inserted.
1876 unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
1878  DebugLoc Loc) {
1879  // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
1880  // what instruction selection does.
1881  Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
1882  // We directly copy the FLAGS register and rely on later lowering to clean
1883  // this up into the appropriate setCC instructions.
1884  BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
1885  ++NumInstsInserted;
1886  return Reg;
1887 }
1888 
1889 /// Restore EFLAGS from the provided GPR. This should be produced by
1890 /// `saveEFLAGS`.
1891 ///
1892 /// This must be done within the same basic block as the save in order to
1893 /// reliably lower.
1894 void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
1896  unsigned Reg) {
1897  BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
1898  ++NumInstsInserted;
1899 }
1900 
1901 /// Takes the current predicate state (in a register) and merges it into the
1902 /// stack pointer. The state is essentially a single bit, but we merge this in
1903 /// a way that won't form non-canonical pointers and also will be preserved
1904 /// across normal stack adjustments.
1905 void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
1907  unsigned PredStateReg) {
1908  Register TmpReg = MRI->createVirtualRegister(PS->RC);
1909  // FIXME: This hard codes a shift distance based on the number of bits needed
1910  // to stay canonical on 64-bit. We should compute this somehow and support
1911  // 32-bit as part of that.
1912  auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg)
1913  .addReg(PredStateReg, RegState::Kill)
1914  .addImm(47);
1915  ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1916  ++NumInstsInserted;
1917  auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP)
1918  .addReg(X86::RSP)
1919  .addReg(TmpReg, RegState::Kill);
1920  OrI->addRegisterDead(X86::EFLAGS, TRI);
1921  ++NumInstsInserted;
1922 }
1923 
1924 /// Extracts the predicate state stored in the high bits of the stack pointer.
1925 unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
1927  DebugLoc Loc) {
1928  Register PredStateReg = MRI->createVirtualRegister(PS->RC);
1929  Register TmpReg = MRI->createVirtualRegister(PS->RC);
1930 
1931  // We know that the stack pointer will have any preserved predicate state in
1932  // its high bit. We just want to smear this across the other bits. Turns out,
1933  // this is exactly what an arithmetic right shift does.
1934  BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg)
1935  .addReg(X86::RSP);
1936  auto ShiftI =
1937  BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg)
1938  .addReg(TmpReg, RegState::Kill)
1939  .addImm(TRI->getRegSizeInBits(*PS->RC) - 1);
1940  ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1941  ++NumInstsInserted;
1942 
1943  return PredStateReg;
1944 }
1945 
1946 void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
1947  MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
1948  SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
1949  MachineBasicBlock &MBB = *MI.getParent();
1950  DebugLoc Loc = MI.getDebugLoc();
1951 
1952  // Check if EFLAGS are alive by seeing if there is a def of them or they
1953  // live-in, and then seeing if that def is in turn used.
1954  bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI);
1955 
1956  SmallVector<MachineOperand *, 2> HardenOpRegs;
1957 
1958  if (BaseMO.isFI()) {
1959  // A frame index is never a dynamically controllable load, so only
1960  // harden it if we're covering fixed address loads as well.
1961  LLVM_DEBUG(
1962  dbgs() << " Skipping hardening base of explicit stack frame load: ";
1963  MI.dump(); dbgs() << "\n");
1964  } else if (BaseMO.getReg() == X86::RSP) {
1965  // Some idempotent atomic operations are lowered directly to a locked
1966  // OR with 0 to the top of stack(or slightly offset from top) which uses an
1967  // explicit RSP register as the base.
1968  assert(IndexMO.getReg() == X86::NoRegister &&
1969  "Explicit RSP access with dynamic index!");
1970  LLVM_DEBUG(
1971  dbgs() << " Cannot harden base of explicit RSP offset in a load!");
1972  } else if (BaseMO.getReg() == X86::RIP ||
1973  BaseMO.getReg() == X86::NoRegister) {
1974  // For both RIP-relative addressed loads or absolute loads, we cannot
1975  // meaningfully harden them because the address being loaded has no
1976  // dynamic component.
1977  //
1978  // FIXME: When using a segment base (like TLS does) we end up with the
1979  // dynamic address being the base plus -1 because we can't mutate the
1980  // segment register here. This allows the signed 32-bit offset to point at
1981  // valid segment-relative addresses and load them successfully.
1982  LLVM_DEBUG(
1983  dbgs() << " Cannot harden base of "
1984  << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base")
1985  << " address in a load!");
1986  } else {
1987  assert(BaseMO.isReg() &&
1988  "Only allowed to have a frame index or register base.");
1989  HardenOpRegs.push_back(&BaseMO);
1990  }
1991 
1992  if (IndexMO.getReg() != X86::NoRegister &&
1993  (HardenOpRegs.empty() ||
1994  HardenOpRegs.front()->getReg() != IndexMO.getReg()))
1995  HardenOpRegs.push_back(&IndexMO);
1996 
1997  assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
1998  "Should have exactly one or two registers to harden!");
1999  assert((HardenOpRegs.size() == 1 ||
2000  HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
2001  "Should not have two of the same registers!");
2002 
2003  // Remove any registers that have alreaded been checked.
2004  llvm::erase_if(HardenOpRegs, [&](MachineOperand *Op) {
2005  // See if this operand's register has already been checked.
2006  auto It = AddrRegToHardenedReg.find(Op->getReg());
2007  if (It == AddrRegToHardenedReg.end())
2008  // Not checked, so retain this one.
2009  return false;
2010 
2011  // Otherwise, we can directly update this operand and remove it.
2012  Op->setReg(It->second);
2013  return true;
2014  });
2015  // If there are none left, we're done.
2016  if (HardenOpRegs.empty())
2017  return;
2018 
2019  // Compute the current predicate state.
2020  unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2021 
2022  auto InsertPt = MI.getIterator();
2023 
2024  // If EFLAGS are live and we don't have access to instructions that avoid
2025  // clobbering EFLAGS we need to save and restore them. This in turn makes
2026  // the EFLAGS no longer live.
2027  unsigned FlagsReg = 0;
2028  if (EFLAGSLive && !Subtarget->hasBMI2()) {
2029  EFLAGSLive = false;
2030  FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2031  }
2032 
2033  for (MachineOperand *Op : HardenOpRegs) {
2034  Register OpReg = Op->getReg();
2035  auto *OpRC = MRI->getRegClass(OpReg);
2036  Register TmpReg = MRI->createVirtualRegister(OpRC);
2037 
2038  // If this is a vector register, we'll need somewhat custom logic to handle
2039  // hardening it.
2040  if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
2041  OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
2042  assert(Subtarget->hasAVX2() && "AVX2-specific register classes!");
2043  bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
2044 
2045  // Move our state into a vector register.
2046  // FIXME: We could skip this at the cost of longer encodings with AVX-512
2047  // but that doesn't seem likely worth it.
2048  Register VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
2049  auto MovI =
2050  BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
2051  .addReg(StateReg);
2052  (void)MovI;
2053  ++NumInstsInserted;
2054  LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n");
2055 
2056  // Broadcast it across the vector register.
2057  Register VBStateReg = MRI->createVirtualRegister(OpRC);
2058  auto BroadcastI = BuildMI(MBB, InsertPt, Loc,
2059  TII->get(Is128Bit ? X86::VPBROADCASTQrr
2060  : X86::VPBROADCASTQYrr),
2061  VBStateReg)
2062  .addReg(VStateReg);
2063  (void)BroadcastI;
2064  ++NumInstsInserted;
2065  LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
2066  dbgs() << "\n");
2067 
2068  // Merge our potential poison state into the value with a vector or.
2069  auto OrI =
2070  BuildMI(MBB, InsertPt, Loc,
2071  TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
2072  .addReg(VBStateReg)
2073  .addReg(OpReg);
2074  (void)OrI;
2075  ++NumInstsInserted;
2076  LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
2077  } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
2078  OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
2079  OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
2080  assert(Subtarget->hasAVX512() && "AVX512-specific register classes!");
2081  bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
2082  bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
2083  if (Is128Bit || Is256Bit)
2084  assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!");
2085 
2086  // Broadcast our state into a vector register.
2087  Register VStateReg = MRI->createVirtualRegister(OpRC);
2088  unsigned BroadcastOp =
2089  Is128Bit ? X86::VPBROADCASTQrZ128r
2090  : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr;
2091  auto BroadcastI =
2092  BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
2093  .addReg(StateReg);
2094  (void)BroadcastI;
2095  ++NumInstsInserted;
2096  LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
2097  dbgs() << "\n");
2098 
2099  // Merge our potential poison state into the value with a vector or.
2100  unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
2101  : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
2102  auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
2103  .addReg(VStateReg)
2104  .addReg(OpReg);
2105  (void)OrI;
2106  ++NumInstsInserted;
2107  LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
2108  } else {
2109  // FIXME: Need to support GR32 here for 32-bit code.
2110  assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
2111  "Not a supported register class for address hardening!");
2112 
2113  if (!EFLAGSLive) {
2114  // Merge our potential poison state into the value with an or.
2115  auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
2116  .addReg(StateReg)
2117  .addReg(OpReg);
2118  OrI->addRegisterDead(X86::EFLAGS, TRI);
2119  ++NumInstsInserted;
2120  LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
2121  } else {
2122  // We need to avoid touching EFLAGS so shift out all but the least
2123  // significant bit using the instruction that doesn't update flags.
2124  auto ShiftI =
2125  BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
2126  .addReg(OpReg)
2127  .addReg(StateReg);
2128  (void)ShiftI;
2129  ++NumInstsInserted;
2130  LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
2131  dbgs() << "\n");
2132  }
2133  }
2134 
2135  // Record this register as checked and update the operand.
2136  assert(!AddrRegToHardenedReg.count(Op->getReg()) &&
2137  "Should not have checked this register yet!");
2138  AddrRegToHardenedReg[Op->getReg()] = TmpReg;
2139  Op->setReg(TmpReg);
2140  ++NumAddrRegsHardened;
2141  }
2142 
2143  // And restore the flags if needed.
2144  if (FlagsReg)
2145  restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2146 }
2147 
2148 MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
2149  MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) {
2150  assert(isDataInvariantLoad(InitialMI) &&
2151  "Cannot get here with a non-invariant load!");
2152 
2153  // See if we can sink hardening the loaded value.
2154  auto SinkCheckToSingleUse =
2156  Register DefReg = MI.getOperand(0).getReg();
2157 
2158  // We need to find a single use which we can sink the check. We can
2159  // primarily do this because many uses may already end up checked on their
2160  // own.
2161  MachineInstr *SingleUseMI = nullptr;
2162  for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) {
2163  // If we're already going to harden this use, it is data invariant and
2164  // within our block.
2165  if (HardenedInstrs.count(&UseMI)) {
2166  if (!isDataInvariantLoad(UseMI)) {
2167  // If we've already decided to harden a non-load, we must have sunk
2168  // some other post-load hardened instruction to it and it must itself
2169  // be data-invariant.
2171  "Data variant instruction being hardened!");
2172  continue;
2173  }
2174 
2175  // Otherwise, this is a load and the load component can't be data
2176  // invariant so check how this register is being used.
2177  const MCInstrDesc &Desc = UseMI.getDesc();
2178  int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
2179  assert(MemRefBeginIdx >= 0 &&
2180  "Should always have mem references here!");
2181  MemRefBeginIdx += X86II::getOperandBias(Desc);
2182 
2183  MachineOperand &BaseMO =
2184  UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
2185  MachineOperand &IndexMO =
2186  UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
2187  if ((BaseMO.isReg() && BaseMO.getReg() == DefReg) ||
2188  (IndexMO.isReg() && IndexMO.getReg() == DefReg))
2189  // The load uses the register as part of its address making it not
2190  // invariant.
2191  return {};
2192 
2193  continue;
2194  }
2195 
2196  if (SingleUseMI)
2197  // We already have a single use, this would make two. Bail.
2198  return {};
2199 
2200  // If this single use isn't data invariant, isn't in this block, or has
2201  // interfering EFLAGS, we can't sink the hardening to it.
2202  if (!isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent())
2203  return {};
2204 
2205  // If this instruction defines multiple registers bail as we won't harden
2206  // all of them.
2207  if (UseMI.getDesc().getNumDefs() > 1)
2208  return {};
2209 
2210  // If this register isn't a virtual register we can't walk uses of sanely,
2211  // just bail. Also check that its register class is one of the ones we
2212  // can harden.
2213  Register UseDefReg = UseMI.getOperand(0).getReg();
2214  if (!Register::isVirtualRegister(UseDefReg) ||
2215  !canHardenRegister(UseDefReg))
2216  return {};
2217 
2218  SingleUseMI = &UseMI;
2219  }
2220 
2221  // If SingleUseMI is still null, there is no use that needs its own
2222  // checking. Otherwise, it is the single use that needs checking.
2223  return {SingleUseMI};
2224  };
2225 
2226  MachineInstr *MI = &InitialMI;
2227  while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
2228  // Update which MI we're checking now.
2229  MI = *SingleUse;
2230  if (!MI)
2231  break;
2232  }
2233 
2234  return MI;
2235 }
2236 
2237 bool X86SpeculativeLoadHardeningPass::canHardenRegister(unsigned Reg) {
2238  auto *RC = MRI->getRegClass(Reg);
2239  int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
2240  if (RegBytes > 8)
2241  // We don't support post-load hardening of vectors.
2242  return false;
2243 
2244  unsigned RegIdx = Log2_32(RegBytes);
2245  assert(RegIdx < 4 && "Unsupported register size");
2246 
2247  // If this register class is explicitly constrained to a class that doesn't
2248  // require REX prefix, we may not be able to satisfy that constraint when
2249  // emitting the hardening instructions, so bail out here.
2250  // FIXME: This seems like a pretty lame hack. The way this comes up is when we
2251  // end up both with a NOREX and REX-only register as operands to the hardening
2252  // instructions. It would be better to fix that code to handle this situation
2253  // rather than hack around it in this way.
2254  const TargetRegisterClass *NOREXRegClasses[] = {
2255  &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
2256  &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
2257  if (RC == NOREXRegClasses[RegIdx])
2258  return false;
2259 
2260  const TargetRegisterClass *GPRRegClasses[] = {
2261  &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
2262  &X86::GR64RegClass};
2263  return RC->hasSuperClassEq(GPRRegClasses[RegIdx]);
2264 }
2265 
2266 /// Harden a value in a register.
2267 ///
2268 /// This is the low-level logic to fully harden a value sitting in a register
2269 /// against leaking during speculative execution.
2270 ///
2271 /// Unlike hardening an address that is used by a load, this routine is required
2272 /// to hide *all* incoming bits in the register.
2273 ///
2274 /// `Reg` must be a virtual register. Currently, it is required to be a GPR no
2275 /// larger than the predicate state register. FIXME: We should support vector
2276 /// registers here by broadcasting the predicate state.
2277 ///
2278 /// The new, hardened virtual register is returned. It will have the same
2279 /// register class as `Reg`.
2280 unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
2281  unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
2282  DebugLoc Loc) {
2283  assert(canHardenRegister(Reg) && "Cannot harden this register!");
2284  assert(Register::isVirtualRegister(Reg) && "Cannot harden a physical register!");
2285 
2286  auto *RC = MRI->getRegClass(Reg);
2287  int Bytes = TRI->getRegSizeInBits(*RC) / 8;
2288 
2289  unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2290 
2291  // FIXME: Need to teach this about 32-bit mode.
2292  if (Bytes != 8) {
2293  unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
2294  unsigned SubRegImm = SubRegImms[Log2_32(Bytes)];
2295  Register NarrowStateReg = MRI->createVirtualRegister(RC);
2296  BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg)
2297  .addReg(StateReg, 0, SubRegImm);
2298  StateReg = NarrowStateReg;
2299  }
2300 
2301  unsigned FlagsReg = 0;
2302  if (isEFLAGSLive(MBB, InsertPt, *TRI))
2303  FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2304 
2305  Register NewReg = MRI->createVirtualRegister(RC);
2306  unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
2307  unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)];
2308  auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg)
2309  .addReg(StateReg)
2310  .addReg(Reg);
2311  OrI->addRegisterDead(X86::EFLAGS, TRI);
2312  ++NumInstsInserted;
2313  LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
2314 
2315  if (FlagsReg)
2316  restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2317 
2318  return NewReg;
2319 }
2320 
2321 /// Harden a load by hardening the loaded value in the defined register.
2322 ///
2323 /// We can harden a non-leaking load into a register without touching the
2324 /// address by just hiding all of the loaded bits during misspeculation. We use
2325 /// an `or` instruction to do this because we set up our poison value as all
2326 /// ones. And the goal is just for the loaded bits to not be exposed to
2327 /// execution and coercing them to one is sufficient.
2328 ///
2329 /// Returns the newly hardened register.
2330 unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
2331  MachineBasicBlock &MBB = *MI.getParent();
2332  DebugLoc Loc = MI.getDebugLoc();
2333 
2334  auto &DefOp = MI.getOperand(0);
2335  Register OldDefReg = DefOp.getReg();
2336  auto *DefRC = MRI->getRegClass(OldDefReg);
2337 
2338  // Because we want to completely replace the uses of this def'ed value with
2339  // the hardened value, create a dedicated new register that will only be used
2340  // to communicate the unhardened value to the hardening.
2341  Register UnhardenedReg = MRI->createVirtualRegister(DefRC);
2342  DefOp.setReg(UnhardenedReg);
2343 
2344  // Now harden this register's value, getting a hardened reg that is safe to
2345  // use. Note that we insert the instructions to compute this *after* the
2346  // defining instruction, not before it.
2347  unsigned HardenedReg = hardenValueInRegister(
2348  UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);
2349 
2350  // Finally, replace the old register (which now only has the uses of the
2351  // original def) with the hardened register.
2352  MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg);
2353 
2354  ++NumPostLoadRegsHardened;
2355  return HardenedReg;
2356 }
2357 
2358 /// Harden a return instruction.
2359 ///
2360 /// Returns implicitly perform a load which we need to harden. Without hardening
2361 /// this load, an attacker my speculatively write over the return address to
2362 /// steer speculation of the return to an attacker controlled address. This is
2363 /// called Spectre v1.1 or Bounds Check Bypass Store (BCBS) and is described in
2364 /// this paper:
2365 /// https://people.csail.mit.edu/vlk/spectre11.pdf
2366 ///
2367 /// We can harden this by introducing an LFENCE that will delay any load of the
2368 /// return address until prior instructions have retired (and thus are not being
2369 /// speculated), or we can harden the address used by the implicit load: the
2370 /// stack pointer.
2371 ///
2372 /// If we are not using an LFENCE, hardening the stack pointer has an additional
2373 /// benefit: it allows us to pass the predicate state accumulated in this
2374 /// function back to the caller. In the absence of a BCBS attack on the return,
2375 /// the caller will typically be resumed and speculatively executed due to the
2376 /// Return Stack Buffer (RSB) prediction which is very accurate and has a high
2377 /// priority. It is possible that some code from the caller will be executed
2378 /// speculatively even during a BCBS-attacked return until the steering takes
2379 /// effect. Whenever this happens, the caller can recover the (poisoned)
2380 /// predicate state from the stack pointer and continue to harden loads.
2381 void X86SpeculativeLoadHardeningPass::hardenReturnInstr(MachineInstr &MI) {
2382  MachineBasicBlock &MBB = *MI.getParent();
2383  DebugLoc Loc = MI.getDebugLoc();
2384  auto InsertPt = MI.getIterator();
2385 
2386  if (FenceCallAndRet)
2387  // No need to fence here as we'll fence at the return site itself. That
2388  // handles more cases than we can handle here.
2389  return;
2390 
2391  // Take our predicate state, shift it to the high 17 bits (so that we keep
2392  // pointers canonical) and merge it into RSP. This will allow the caller to
2393  // extract it when we return (speculatively).
2394  mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
2395 }
2396 
2397 /// Trace the predicate state through a call.
2398 ///
2399 /// There are several layers of this needed to handle the full complexity of
2400 /// calls.
2401 ///
2402 /// First, we need to send the predicate state into the called function. We do
2403 /// this by merging it into the high bits of the stack pointer.
2404 ///
2405 /// For tail calls, this is all we need to do.
2406 ///
2407 /// For calls where we might return and resume the control flow, we need to
2408 /// extract the predicate state from the high bits of the stack pointer after
2409 /// control returns from the called function.
2410 ///
2411 /// We also need to verify that we intended to return to this location in the
2412 /// code. An attacker might arrange for the processor to mispredict the return
2413 /// to this valid but incorrect return address in the program rather than the
2414 /// correct one. See the paper on this attack, called "ret2spec" by the
2415 /// researchers, here:
2416 /// https://christian-rossow.de/publications/ret2spec-ccs2018.pdf
2417 ///
2418 /// The way we verify that we returned to the correct location is by preserving
2419 /// the expected return address across the call. One technique involves taking
2420 /// advantage of the red-zone to load the return address from `8(%rsp)` where it
2421 /// was left by the RET instruction when it popped `%rsp`. Alternatively, we can
2422 /// directly save the address into a register that will be preserved across the
2423 /// call. We compare this intended return address against the address
2424 /// immediately following the call (the observed return address). If these
2425 /// mismatch, we have detected misspeculation and can poison our predicate
2426 /// state.
2427 void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
2428  MachineInstr &MI) {
2429  MachineBasicBlock &MBB = *MI.getParent();
2430  MachineFunction &MF = *MBB.getParent();
2431  auto InsertPt = MI.getIterator();
2432  DebugLoc Loc = MI.getDebugLoc();
2433 
2434  if (FenceCallAndRet) {
2435  if (MI.isReturn())
2436  // Tail call, we don't return to this function.
2437  // FIXME: We should also handle noreturn calls.
2438  return;
2439 
2440  // We don't need to fence before the call because the function should fence
2441  // in its entry. However, we do need to fence after the call returns.
2442  // Fencing before the return doesn't correctly handle cases where the return
2443  // itself is mispredicted.
2444  BuildMI(MBB, std::next(InsertPt), Loc, TII->get(X86::LFENCE));
2445  ++NumInstsInserted;
2446  ++NumLFENCEsInserted;
2447  return;
2448  }
2449 
2450  // First, we transfer the predicate state into the called function by merging
2451  // it into the stack pointer. This will kill the current def of the state.
2452  unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2453  mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
2454 
2455  // If this call is also a return, it is a tail call and we don't need anything
2456  // else to handle it so just return. Also, if there are no further
2457  // instructions and no successors, this call does not return so we can also
2458  // bail.
2459  if (MI.isReturn() || (std::next(InsertPt) == MBB.end() && MBB.succ_empty()))
2460  return;
2461 
2462  // Create a symbol to track the return address and attach it to the call
2463  // machine instruction. We will lower extra symbols attached to call
2464  // instructions as label immediately following the call.
2465  MCSymbol *RetSymbol =
2466  MF.getContext().createTempSymbol("slh_ret_addr",
2467  /*AlwaysAddSuffix*/ true);
2468  MI.setPostInstrSymbol(MF, RetSymbol);
2469 
2470  const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
2471  unsigned ExpectedRetAddrReg = 0;
2472 
2473  // If we have no red zones or if the function returns twice (possibly without
2474  // using the `ret` instruction) like setjmp, we need to save the expected
2475  // return address prior to the call.
2476  if (!Subtarget->getFrameLowering()->has128ByteRedZone(MF) ||
2477  MF.exposesReturnsTwice()) {
2478  // If we don't have red zones, we need to compute the expected return
2479  // address prior to the call and store it in a register that lives across
2480  // the call.
2481  //
2482  // In some ways, this is doubly satisfying as a mitigation because it will
2483  // also successfully detect stack smashing bugs in some cases (typically,
2484  // when a callee-saved register is used and the callee doesn't push it onto
2485  // the stack). But that isn't our primary goal, so we only use it as
2486  // a fallback.
2487  //
2488  // FIXME: It isn't clear that this is reliable in the face of
2489  // rematerialization in the register allocator. We somehow need to force
2490  // that to not occur for this particular instruction, and instead to spill
2491  // or otherwise preserve the value computed *prior* to the call.
2492  //
2493  // FIXME: It is even less clear why MachineCSE can't just fold this when we
2494  // end up having to use identical instructions both before and after the
2495  // call to feed the comparison.
2496  ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2497  if (MF.getTarget().getCodeModel() == CodeModel::Small &&
2498  !Subtarget->isPositionIndependent()) {
2499  BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2500  .addSym(RetSymbol);
2501  } else {
2502  BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ExpectedRetAddrReg)
2503  .addReg(/*Base*/ X86::RIP)
2504  .addImm(/*Scale*/ 1)
2505  .addReg(/*Index*/ 0)
2506  .addSym(RetSymbol)
2507  .addReg(/*Segment*/ 0);
2508  }
2509  }
2510 
2511  // Step past the call to handle when it returns.
2512  ++InsertPt;
2513 
2514  // If we didn't pre-compute the expected return address into a register, then
2515  // red zones are enabled and the return address is still available on the
2516  // stack immediately after the call. As the very first instruction, we load it
2517  // into a register.
2518  if (!ExpectedRetAddrReg) {
2519  ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2520  BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2521  .addReg(/*Base*/ X86::RSP)
2522  .addImm(/*Scale*/ 1)
2523  .addReg(/*Index*/ 0)
2524  .addImm(/*Displacement*/ -8) // The stack pointer has been popped, so
2525  // the return address is 8-bytes past it.
2526  .addReg(/*Segment*/ 0);
2527  }
2528 
2529  // Now we extract the callee's predicate state from the stack pointer.
2530  unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
2531 
2532  // Test the expected return address against our actual address. If we can
2533  // form this basic block's address as an immediate, this is easy. Otherwise
2534  // we compute it.
2535  if (MF.getTarget().getCodeModel() == CodeModel::Small &&
2536  !Subtarget->isPositionIndependent()) {
2537  // FIXME: Could we fold this with the load? It would require careful EFLAGS
2538  // management.
2539  BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64ri32))
2540  .addReg(ExpectedRetAddrReg, RegState::Kill)
2541  .addSym(RetSymbol);
2542  } else {
2543  Register ActualRetAddrReg = MRI->createVirtualRegister(AddrRC);
2544  BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg)
2545  .addReg(/*Base*/ X86::RIP)
2546  .addImm(/*Scale*/ 1)
2547  .addReg(/*Index*/ 0)
2548  .addSym(RetSymbol)
2549  .addReg(/*Segment*/ 0);
2550  BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64rr))
2551  .addReg(ExpectedRetAddrReg, RegState::Kill)
2552  .addReg(ActualRetAddrReg, RegState::Kill);
2553  }
2554 
2555  // Now conditionally update the predicate state we just extracted if we ended
2556  // up at a different return address than expected.
2557  int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
2558  auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
2559 
2560  Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
2561  auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg)
2562  .addReg(NewStateReg, RegState::Kill)
2563  .addReg(PS->PoisonReg)
2564  .addImm(X86::COND_NE);
2565  CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
2566  ++NumInstsInserted;
2567  LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
2568 
2569  PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
2570 }
2571 
2572 /// An attacker may speculatively store over a value that is then speculatively
2573 /// loaded and used as the target of an indirect call or jump instruction. This
2574 /// is called Spectre v1.2 or Bounds Check Bypass Store (BCBS) and is described
2575 /// in this paper:
2576 /// https://people.csail.mit.edu/vlk/spectre11.pdf
2577 ///
2578 /// When this happens, the speculative execution of the call or jump will end up
2579 /// being steered to this attacker controlled address. While most such loads
2580 /// will be adequately hardened already, we want to ensure that they are
2581 /// definitively treated as needing post-load hardening. While address hardening
2582 /// is sufficient to prevent secret data from leaking to the attacker, it may
2583 /// not be sufficient to prevent an attacker from steering speculative
2584 /// execution. We forcibly unfolded all relevant loads above and so will always
2585 /// have an opportunity to post-load harden here, we just need to scan for cases
2586 /// not already flagged and add them.
2587 void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
2588  MachineInstr &MI,
2589  SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
2590  switch (MI.getOpcode()) {
2591  case X86::FARCALL16m:
2592  case X86::FARCALL32m:
2593  case X86::FARCALL64:
2594  case X86::FARJMP16m:
2595  case X86::FARJMP32m:
2596  case X86::FARJMP64:
2597  // We don't need to harden either far calls or far jumps as they are
2598  // safe from Spectre.
2599  return;
2600 
2601  default:
2602  break;
2603  }
2604 
2605  // We should never see a loading instruction at this point, as those should
2606  // have been unfolded.
2607  assert(!MI.mayLoad() && "Found a lingering loading instruction!");
2608 
2609  // If the first operand isn't a register, this is a branch or call
2610  // instruction with an immediate operand which doesn't need to be hardened.
2611  if (!MI.getOperand(0).isReg())
2612  return;
2613 
2614  // For all of these, the target register is the first operand of the
2615  // instruction.
2616  auto &TargetOp = MI.getOperand(0);
2617  Register OldTargetReg = TargetOp.getReg();
2618 
2619  // Try to lookup a hardened version of this register. We retain a reference
2620  // here as we want to update the map to track any newly computed hardened
2621  // register.
2622  unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2623 
2624  // If we don't have a hardened register yet, compute one. Otherwise, just use
2625  // the already hardened register.
2626  //
2627  // FIXME: It is a little suspect that we use partially hardened registers that
2628  // only feed addresses. The complexity of partial hardening with SHRX
2629  // continues to pile up. Should definitively measure its value and consider
2630  // eliminating it.
2631  if (!HardenedTargetReg)
2632  HardenedTargetReg = hardenValueInRegister(
2633  OldTargetReg, *MI.getParent(), MI.getIterator(), MI.getDebugLoc());
2634 
2635  // Set the target operand to the hardened register.
2636  TargetOp.setReg(HardenedTargetReg);
2637 
2638  ++NumCallsOrJumpsHardened;
2639 }
2640 
2641 INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, PASS_KEY,
2642  "X86 speculative load hardener", false, false)
2643 INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass, PASS_KEY,
2644  "X86 speculative load hardener", false, false)
2645 
2647  return new X86SpeculativeLoadHardeningPass();
2648 }
MachineOperand * findRegisterUseOperand(Register Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:656
MachineBasicBlock * getMBB() const
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Register getReg(unsigned Idx) const
Get the register for the operand index.
void set(unsigned Idx)
void push_back(const T &Elt)
Definition: SmallVector.h:211
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:384
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:179
unsigned Reg
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
static cl::opt< bool > HardenInterprocedurally(PASS_KEY "-ip", cl::desc("Harden interprocedurally by passing our state in and out of " "functions in the high bits of the stack pointer."), cl::init(true), cl::Hidden)
static const TargetRegisterClass * getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII, unsigned Opcode)
Compute the register class for the unfolded load.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
unsigned second
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
void setIsDead(bool Val=true)
static cl::opt< bool > FenceCallAndRet(PASS_KEY "-fence-call-and-ret", cl::desc("Use a full speculation fence to harden both call and ret edges " "rather than a lighter weight mitigation."), cl::init(false), cl::Hidden)
static bool hasVulnerableLoad(MachineFunction &MF)
Helper to scan a function for loads vulnerable to misspeculation that we want to harden.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineSSAUpdater - This class updates SSA form for a set of virtual registers defined in multiple bl...
iterator_range< succ_iterator > successors()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
bool test(unsigned Idx) const
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:672
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:410
static MachineBasicBlock & splitEdge(MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount, MachineInstr *Br, MachineInstr *&UncondBr, const X86InstrInfo &TII)
static cl::opt< bool > HardenEdgesWithLFENCE(PASS_KEY "-lfence", cl::desc("Use LFENCE along each conditional edge to harden against speculative " "loads rather than conditional movs and poisoned pointers."), cl::init(false), cl::Hidden)
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:261
static cl::opt< bool > EnablePostLoadHardening(PASS_KEY "-post-load", cl::desc("Harden the value loaded *after* it is loaded by " "flushing the loaded bits to 1. This is hard to do " "in general but can be done easily for GPRs."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSpeculativeLoadHardening("x86-speculative-load-hardening", cl::desc("Force enable speculative load hardening"), cl::init(false), cl::Hidden)
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
void clear()
Definition: SmallSet.h:218
static void canonicalizePHIOperands(MachineFunction &MF)
Removing duplicate PHI operands to leave the PHI in a canonical and predictable form.
static cl::opt< bool > HardenLoads(PASS_KEY "-loads", cl::desc("Sanitize loads from memory. When disable, no " "significant security is provided."), cl::init(true), cl::Hidden)
void dump() const
Definition: Pass.cpp:134
Memory SSA
Definition: MemorySSA.cpp:65
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
void setReg(Register Reg)
Change the register this operand corresponds to.
void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void Initialize(unsigned V)
Initialize - Reset this object to get ready for a new set of SSA updates.
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:680
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false)
Return a cmov opcode for the given register size in bytes, and operand type.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:646
MCContext & getContext() const
#define PASS_KEY
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
MCSymbol * createTempSymbol(bool CanBeUnnamed=true)
Create and return a new assembler temporary symbol with a unique but unspecified name.
Definition: MCContext.cpp:225
unsigned const MachineRegisterInfo * MRI
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineInstrBuilder & UseMI
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
void setMBB(MachineBasicBlock *MBB)
Represent the analysis usage information of a pass.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1172
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
unsigned getOperandBias(const MCInstrDesc &Desc)
getOperandBias - compute whether all of the def operands are repeated in the uses and therefore shoul...
Definition: X86BaseInfo.h:721
self_iterator getIterator()
Definition: ilist_node.h:81
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
iterator_range< pred_iterator > predecessors()
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MachineOperand * findRegisterDefOperand(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
void setIsKill(bool Val=true)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1095
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
CondCode getCondFromBranch(const MachineInstr &MI)
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
Definition: SmallPtrSet.h:377
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
const X86RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
Definition: X86InstrInfo.h:151
FunctionPass * createX86SpeculativeLoadHardeningPass()
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const Function & getFunction() const
Return the LLVM function that this machine code represents.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2&#39;s erase_if which is equivalent t...
Definition: STLExtras.h:1332
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:585
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
CodeModel::Model getCodeModel() const
Returns the code model.
static cl::opt< bool > HardenIndirectCallsAndJumps(PASS_KEY "-indirect", cl::desc("Harden indirect calls and jumps against using speculatively " "stored attacker controlled addresses. This is designed to " "mitigate Spectre v1.2 style attacks."), cl::init(true), cl::Hidden)
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:255
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:63
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const TargetRegisterInfo &TRI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool isEHPad() const
Returns true if the block is a landing pad.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use...
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
#define I(x, y, z)
Definition: MD5.cpp:58
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB)
GetValueInMiddleOfBlock - Construct SSA form, materializing a value that is live in the middle of the...
void AddAvailableValue(MachineBasicBlock *BB, unsigned V)
AddAvailableValue - Indicate that a rewritten value is available at the end of the specified block wi...
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
void splitSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New, bool NormalizeSuccProbs=false)
Split the old successor into old plus new and updates the probability info.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
X86 speculative load hardener
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:830
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:69
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:273
INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, PASS_KEY, "X86 speculative load hardener", false, false) INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
iterator SkipPHIsLabelsAndDebug(iterator I)
Return the first instruction in MBB after I that is not a PHI, label or debug.
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
Register getReg() const
getReg - Returns the register number.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const override
getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new instruction after load / store ar...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isImplicit() const
int getMemoryOperandNo(uint64_t TSFlags)
getMemoryOperandNo - The function returns the MCInst operand # for the first field of the memory oper...
Definition: X86BaseInfo.h:761
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164