Bug Summary

File:lib/Target/X86/X86SpeculativeLoadHardening.cpp
Warning:line 2246, column 10
The right operand of '==' is a garbage value due to array index out of bounds

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86SpeculativeLoadHardening.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86 -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/X86 -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp -faddrsig
1//====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// Provide a pass which mitigates speculative execution attacks which operate
12/// by speculating incorrectly past some predicate (a type check, bounds check,
13/// or other condition) to reach a load with invalid inputs and leak the data
14/// accessed by that load using a side channel out of the speculative domain.
15///
16/// For details on the attacks, see the first variant in both the Project Zero
17/// writeup and the Spectre paper:
18/// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
19/// https://spectreattack.com/spectre.pdf
20///
21//===----------------------------------------------------------------------===//
22
23#include "X86.h"
24#include "X86InstrBuilder.h"
25#include "X86InstrInfo.h"
26#include "X86Subtarget.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/Optional.h"
30#include "llvm/ADT/STLExtras.h"
31#include "llvm/ADT/ScopeExit.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/SmallSet.h"
34#include "llvm/ADT/SmallVector.h"
35#include "llvm/ADT/SparseBitVector.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/CodeGen/MachineBasicBlock.h"
38#include "llvm/CodeGen/MachineConstantPool.h"
39#include "llvm/CodeGen/MachineFunction.h"
40#include "llvm/CodeGen/MachineFunctionPass.h"
41#include "llvm/CodeGen/MachineInstr.h"
42#include "llvm/CodeGen/MachineInstrBuilder.h"
43#include "llvm/CodeGen/MachineModuleInfo.h"
44#include "llvm/CodeGen/MachineOperand.h"
45#include "llvm/CodeGen/MachineRegisterInfo.h"
46#include "llvm/CodeGen/MachineSSAUpdater.h"
47#include "llvm/CodeGen/TargetInstrInfo.h"
48#include "llvm/CodeGen/TargetRegisterInfo.h"
49#include "llvm/CodeGen/TargetSchedule.h"
50#include "llvm/CodeGen/TargetSubtargetInfo.h"
51#include "llvm/IR/DebugLoc.h"
52#include "llvm/MC/MCSchedule.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58#include <cassert>
59#include <iterator>
60#include <utility>
61
62using namespace llvm;
63
64#define PASS_KEY"x86-slh" "x86-slh"
65#define DEBUG_TYPE"x86-slh" PASS_KEY"x86-slh"
66
67STATISTIC(NumCondBranchesTraced, "Number of conditional branches traced")static llvm::Statistic NumCondBranchesTraced = {"x86-slh", "NumCondBranchesTraced"
, "Number of conditional branches traced", {0}, {false}}
;
68STATISTIC(NumBranchesUntraced, "Number of branches unable to trace")static llvm::Statistic NumBranchesUntraced = {"x86-slh", "NumBranchesUntraced"
, "Number of branches unable to trace", {0}, {false}}
;
69STATISTIC(NumAddrRegsHardened,static llvm::Statistic NumAddrRegsHardened = {"x86-slh", "NumAddrRegsHardened"
, "Number of address mode used registers hardaned", {0}, {false
}}
70 "Number of address mode used registers hardaned")static llvm::Statistic NumAddrRegsHardened = {"x86-slh", "NumAddrRegsHardened"
, "Number of address mode used registers hardaned", {0}, {false
}}
;
71STATISTIC(NumPostLoadRegsHardened,static llvm::Statistic NumPostLoadRegsHardened = {"x86-slh", "NumPostLoadRegsHardened"
, "Number of post-load register values hardened", {0}, {false
}}
72 "Number of post-load register values hardened")static llvm::Statistic NumPostLoadRegsHardened = {"x86-slh", "NumPostLoadRegsHardened"
, "Number of post-load register values hardened", {0}, {false
}}
;
73STATISTIC(NumCallsOrJumpsHardened,static llvm::Statistic NumCallsOrJumpsHardened = {"x86-slh", "NumCallsOrJumpsHardened"
, "Number of calls or jumps requiring extra hardening", {0}, {
false}}
74 "Number of calls or jumps requiring extra hardening")static llvm::Statistic NumCallsOrJumpsHardened = {"x86-slh", "NumCallsOrJumpsHardened"
, "Number of calls or jumps requiring extra hardening", {0}, {
false}}
;
75STATISTIC(NumInstsInserted, "Number of instructions inserted")static llvm::Statistic NumInstsInserted = {"x86-slh", "NumInstsInserted"
, "Number of instructions inserted", {0}, {false}}
;
76STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted")static llvm::Statistic NumLFENCEsInserted = {"x86-slh", "NumLFENCEsInserted"
, "Number of lfence instructions inserted", {0}, {false}}
;
77
78static cl::opt<bool> EnableSpeculativeLoadHardening(
79 "x86-speculative-load-hardening",
80 cl::desc("Force enable speculative load hardening"), cl::init(false),
81 cl::Hidden);
82
83static cl::opt<bool> HardenEdgesWithLFENCE(
84 PASS_KEY"x86-slh" "-lfence",
85 cl::desc(
86 "Use LFENCE along each conditional edge to harden against speculative "
87 "loads rather than conditional movs and poisoned pointers."),
88 cl::init(false), cl::Hidden);
89
90static cl::opt<bool> EnablePostLoadHardening(
91 PASS_KEY"x86-slh" "-post-load",
92 cl::desc("Harden the value loaded *after* it is loaded by "
93 "flushing the loaded bits to 1. This is hard to do "
94 "in general but can be done easily for GPRs."),
95 cl::init(true), cl::Hidden);
96
97static cl::opt<bool> FenceCallAndRet(
98 PASS_KEY"x86-slh" "-fence-call-and-ret",
99 cl::desc("Use a full speculation fence to harden both call and ret edges "
100 "rather than a lighter weight mitigation."),
101 cl::init(false), cl::Hidden);
102
103static cl::opt<bool> HardenInterprocedurally(
104 PASS_KEY"x86-slh" "-ip",
105 cl::desc("Harden interprocedurally by passing our state in and out of "
106 "functions in the high bits of the stack pointer."),
107 cl::init(true), cl::Hidden);
108
109static cl::opt<bool>
110 HardenLoads(PASS_KEY"x86-slh" "-loads",
111 cl::desc("Sanitize loads from memory. When disable, no "
112 "significant security is provided."),
113 cl::init(true), cl::Hidden);
114
115static cl::opt<bool> HardenIndirectCallsAndJumps(
116 PASS_KEY"x86-slh" "-indirect",
117 cl::desc("Harden indirect calls and jumps against using speculatively "
118 "stored attacker controlled addresses. This is designed to "
119 "mitigate Spectre v1.2 style attacks."),
120 cl::init(true), cl::Hidden);
121
122namespace llvm {
123
124void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
125
126} // end namespace llvm
127
128namespace {
129
130class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
131public:
132 X86SpeculativeLoadHardeningPass() : MachineFunctionPass(ID) {
133 initializeX86SpeculativeLoadHardeningPassPass(
134 *PassRegistry::getPassRegistry());
135 }
136
137 StringRef getPassName() const override {
138 return "X86 speculative load hardening";
139 }
140 bool runOnMachineFunction(MachineFunction &MF) override;
141 void getAnalysisUsage(AnalysisUsage &AU) const override;
142
143 /// Pass identification, replacement for typeid.
144 static char ID;
145
146private:
147 /// The information about a block's conditional terminators needed to trace
148 /// our predicate state through the exiting edges.
149 struct BlockCondInfo {
150 MachineBasicBlock *MBB;
151
152 // We mostly have one conditional branch, and in extremely rare cases have
153 // two. Three and more are so rare as to be unimportant for compile time.
154 SmallVector<MachineInstr *, 2> CondBrs;
155
156 MachineInstr *UncondBr;
157 };
158
159 /// Manages the predicate state traced through the program.
160 struct PredState {
161 unsigned InitialReg;
162 unsigned PoisonReg;
163
164 const TargetRegisterClass *RC;
165 MachineSSAUpdater SSA;
166
167 PredState(MachineFunction &MF, const TargetRegisterClass *RC)
168 : RC(RC), SSA(MF) {}
169 };
170
171 const X86Subtarget *Subtarget;
172 MachineRegisterInfo *MRI;
173 const X86InstrInfo *TII;
174 const TargetRegisterInfo *TRI;
175
176 Optional<PredState> PS;
177
178 void hardenEdgesWithLFENCE(MachineFunction &MF);
179
180 SmallVector<BlockCondInfo, 16> collectBlockCondInfo(MachineFunction &MF);
181
182 SmallVector<MachineInstr *, 16>
183 tracePredStateThroughCFG(MachineFunction &MF, ArrayRef<BlockCondInfo> Infos);
184
185 void unfoldCallAndJumpLoads(MachineFunction &MF);
186
187 SmallVector<MachineInstr *, 16>
188 tracePredStateThroughIndirectBranches(MachineFunction &MF);
189
190 void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
191
192 unsigned saveEFLAGS(MachineBasicBlock &MBB,
193 MachineBasicBlock::iterator InsertPt, DebugLoc Loc);
194 void restoreEFLAGS(MachineBasicBlock &MBB,
195 MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
196 unsigned OFReg);
197
198 void mergePredStateIntoSP(MachineBasicBlock &MBB,
199 MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
200 unsigned PredStateReg);
201 unsigned extractPredStateFromSP(MachineBasicBlock &MBB,
202 MachineBasicBlock::iterator InsertPt,
203 DebugLoc Loc);
204
205 void
206 hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
207 MachineOperand &IndexMO,
208 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
209 MachineInstr *
210 sinkPostLoadHardenedInst(MachineInstr &MI,
211 SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
212 bool canHardenRegister(unsigned Reg);
213 unsigned hardenValueInRegister(unsigned Reg, MachineBasicBlock &MBB,
214 MachineBasicBlock::iterator InsertPt,
215 DebugLoc Loc);
216 unsigned hardenPostLoad(MachineInstr &MI);
217 void hardenReturnInstr(MachineInstr &MI);
218 void tracePredStateThroughCall(MachineInstr &MI);
219 void hardenIndirectCallOrJumpInstr(
220 MachineInstr &MI,
221 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
222};
223
224} // end anonymous namespace
225
226char X86SpeculativeLoadHardeningPass::ID = 0;
227
228void X86SpeculativeLoadHardeningPass::getAnalysisUsage(
229 AnalysisUsage &AU) const {
230 MachineFunctionPass::getAnalysisUsage(AU);
231}
232
233static MachineBasicBlock &splitEdge(MachineBasicBlock &MBB,
234 MachineBasicBlock &Succ, int SuccCount,
235 MachineInstr *Br, MachineInstr *&UncondBr,
236 const X86InstrInfo &TII) {
237 assert(!Succ.isEHPad() && "Shouldn't get edges to EH pads!")((!Succ.isEHPad() && "Shouldn't get edges to EH pads!"
) ? static_cast<void> (0) : __assert_fail ("!Succ.isEHPad() && \"Shouldn't get edges to EH pads!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 237, __PRETTY_FUNCTION__))
;
238
239 MachineFunction &MF = *MBB.getParent();
240
241 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
242
243 // We have to insert the new block immediately after the current one as we
244 // don't know what layout-successor relationships the successor has and we
245 // may not be able to (and generally don't want to) try to fix those up.
246 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
247
248 // Update the branch instruction if necessary.
249 if (Br) {
250 assert(Br->getOperand(0).getMBB() == &Succ &&((Br->getOperand(0).getMBB() == &Succ && "Didn't start with the right target!"
) ? static_cast<void> (0) : __assert_fail ("Br->getOperand(0).getMBB() == &Succ && \"Didn't start with the right target!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 251, __PRETTY_FUNCTION__))
251 "Didn't start with the right target!")((Br->getOperand(0).getMBB() == &Succ && "Didn't start with the right target!"
) ? static_cast<void> (0) : __assert_fail ("Br->getOperand(0).getMBB() == &Succ && \"Didn't start with the right target!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 251, __PRETTY_FUNCTION__))
;
252 Br->getOperand(0).setMBB(&NewMBB);
253
254 // If this successor was reached through a branch rather than fallthrough,
255 // we might have *broken* fallthrough and so need to inject a new
256 // unconditional branch.
257 if (!UncondBr) {
258 MachineBasicBlock &OldLayoutSucc =
259 *std::next(MachineFunction::iterator(&NewMBB));
260 assert(MBB.isSuccessor(&OldLayoutSucc) &&((MBB.isSuccessor(&OldLayoutSucc) && "Without an unconditional branch, the old layout successor should "
"be an actual successor!") ? static_cast<void> (0) : __assert_fail
("MBB.isSuccessor(&OldLayoutSucc) && \"Without an unconditional branch, the old layout successor should \" \"be an actual successor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 262, __PRETTY_FUNCTION__))
261 "Without an unconditional branch, the old layout successor should "((MBB.isSuccessor(&OldLayoutSucc) && "Without an unconditional branch, the old layout successor should "
"be an actual successor!") ? static_cast<void> (0) : __assert_fail
("MBB.isSuccessor(&OldLayoutSucc) && \"Without an unconditional branch, the old layout successor should \" \"be an actual successor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 262, __PRETTY_FUNCTION__))
262 "be an actual successor!")((MBB.isSuccessor(&OldLayoutSucc) && "Without an unconditional branch, the old layout successor should "
"be an actual successor!") ? static_cast<void> (0) : __assert_fail
("MBB.isSuccessor(&OldLayoutSucc) && \"Without an unconditional branch, the old layout successor should \" \"be an actual successor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 262, __PRETTY_FUNCTION__))
;
263 auto BrBuilder =
264 BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc);
265 // Update the unconditional branch now that we've added one.
266 UncondBr = &*BrBuilder;
267 }
268
269 // Insert unconditional "jump Succ" instruction in the new block if
270 // necessary.
271 if (!NewMBB.isLayoutSuccessor(&Succ)) {
272 SmallVector<MachineOperand, 4> Cond;
273 TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc());
274 }
275 } else {
276 assert(!UncondBr &&((!UncondBr && "Cannot have a branchless successor and an unconditional branch!"
) ? static_cast<void> (0) : __assert_fail ("!UncondBr && \"Cannot have a branchless successor and an unconditional branch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 277, __PRETTY_FUNCTION__))
277 "Cannot have a branchless successor and an unconditional branch!")((!UncondBr && "Cannot have a branchless successor and an unconditional branch!"
) ? static_cast<void> (0) : __assert_fail ("!UncondBr && \"Cannot have a branchless successor and an unconditional branch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 277, __PRETTY_FUNCTION__))
;
278 assert(NewMBB.isLayoutSuccessor(&Succ) &&((NewMBB.isLayoutSuccessor(&Succ) && "A non-branch successor must have been a layout successor before "
"and now is a layout successor of the new block.") ? static_cast
<void> (0) : __assert_fail ("NewMBB.isLayoutSuccessor(&Succ) && \"A non-branch successor must have been a layout successor before \" \"and now is a layout successor of the new block.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 280, __PRETTY_FUNCTION__))
279 "A non-branch successor must have been a layout successor before "((NewMBB.isLayoutSuccessor(&Succ) && "A non-branch successor must have been a layout successor before "
"and now is a layout successor of the new block.") ? static_cast
<void> (0) : __assert_fail ("NewMBB.isLayoutSuccessor(&Succ) && \"A non-branch successor must have been a layout successor before \" \"and now is a layout successor of the new block.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 280, __PRETTY_FUNCTION__))
280 "and now is a layout successor of the new block.")((NewMBB.isLayoutSuccessor(&Succ) && "A non-branch successor must have been a layout successor before "
"and now is a layout successor of the new block.") ? static_cast
<void> (0) : __assert_fail ("NewMBB.isLayoutSuccessor(&Succ) && \"A non-branch successor must have been a layout successor before \" \"and now is a layout successor of the new block.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 280, __PRETTY_FUNCTION__))
;
281 }
282
283 // If this is the only edge to the successor, we can just replace it in the
284 // CFG. Otherwise we need to add a new entry in the CFG for the new
285 // successor.
286 if (SuccCount == 1) {
287 MBB.replaceSuccessor(&Succ, &NewMBB);
288 } else {
289 MBB.splitSuccessor(&Succ, &NewMBB);
290 }
291
292 // Hook up the edge from the new basic block to the old successor in the CFG.
293 NewMBB.addSuccessor(&Succ);
294
295 // Fix PHI nodes in Succ so they refer to NewMBB instead of MBB.
296 for (MachineInstr &MI : Succ) {
297 if (!MI.isPHI())
298 break;
299 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
300 OpIdx += 2) {
301 MachineOperand &OpV = MI.getOperand(OpIdx);
302 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
303 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!")((OpMBB.isMBB() && "Block operand to a PHI is not a block!"
) ? static_cast<void> (0) : __assert_fail ("OpMBB.isMBB() && \"Block operand to a PHI is not a block!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 303, __PRETTY_FUNCTION__))
;
304 if (OpMBB.getMBB() != &MBB)
305 continue;
306
307 // If this is the last edge to the succesor, just replace MBB in the PHI
308 if (SuccCount == 1) {
309 OpMBB.setMBB(&NewMBB);
310 break;
311 }
312
313 // Otherwise, append a new pair of operands for the new incoming edge.
314 MI.addOperand(MF, OpV);
315 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
316 break;
317 }
318 }
319
320 // Inherit live-ins from the successor
321 for (auto &LI : Succ.liveins())
322 NewMBB.addLiveIn(LI);
323
324 LLVM_DEBUG(dbgs() << " Split edge from '" << MBB.getName() << "' to '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Split edge from '" <<
MBB.getName() << "' to '" << Succ.getName() <<
"'.\n"; } } while (false)
325 << Succ.getName() << "'.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Split edge from '" <<
MBB.getName() << "' to '" << Succ.getName() <<
"'.\n"; } } while (false)
;
326 return NewMBB;
327}
328
329/// Removing duplicate PHI operands to leave the PHI in a canonical and
330/// predictable form.
331///
332/// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
333/// isn't what you might expect. We may have multiple entries in PHI nodes for
334/// a single predecessor. This makes CFG-updating extremely complex, so here we
335/// simplify all PHI nodes to a model even simpler than the IR's model: exactly
336/// one entry per predecessor, regardless of how many edges there are.
337static void canonicalizePHIOperands(MachineFunction &MF) {
338 SmallPtrSet<MachineBasicBlock *, 4> Preds;
339 SmallVector<int, 4> DupIndices;
340 for (auto &MBB : MF)
341 for (auto &MI : MBB) {
342 if (!MI.isPHI())
343 break;
344
345 // First we scan the operands of the PHI looking for duplicate entries
346 // a particular predecessor. We retain the operand index of each duplicate
347 // entry found.
348 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
349 OpIdx += 2)
350 if (!Preds.insert(MI.getOperand(OpIdx + 1).getMBB()).second)
351 DupIndices.push_back(OpIdx);
352
353 // Now walk the duplicate indices, removing both the block and value. Note
354 // that these are stored as a vector making this element-wise removal
355 // :w
356 // potentially quadratic.
357 //
358 // FIXME: It is really frustrating that we have to use a quadratic
359 // removal algorithm here. There should be a better way, but the use-def
360 // updates required make that impossible using the public API.
361 //
362 // Note that we have to process these backwards so that we don't
363 // invalidate other indices with each removal.
364 while (!DupIndices.empty()) {
365 int OpIdx = DupIndices.pop_back_val();
366 // Remove both the block and value operand, again in reverse order to
367 // preserve indices.
368 MI.RemoveOperand(OpIdx + 1);
369 MI.RemoveOperand(OpIdx);
370 }
371
372 Preds.clear();
373 }
374}
375
376/// Helper to scan a function for loads vulnerable to misspeculation that we
377/// want to harden.
378///
379/// We use this to avoid making changes to functions where there is nothing we
380/// need to do to harden against misspeculation.
381static bool hasVulnerableLoad(MachineFunction &MF) {
382 for (MachineBasicBlock &MBB : MF) {
383 for (MachineInstr &MI : MBB) {
384 // Loads within this basic block after an LFENCE are not at risk of
385 // speculatively executing with invalid predicates from prior control
386 // flow. So break out of this block but continue scanning the function.
387 if (MI.getOpcode() == X86::LFENCE)
388 break;
389
390 // Looking for loads only.
391 if (!MI.mayLoad())
392 continue;
393
394 // An MFENCE is modeled as a load but isn't vulnerable to misspeculation.
395 if (MI.getOpcode() == X86::MFENCE)
396 continue;
397
398 // We found a load.
399 return true;
400 }
401 }
402
403 // No loads found.
404 return false;
405}
406
407bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
408 MachineFunction &MF) {
409 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "********** " << getPassName
() << " : " << MF.getName() << " **********\n"
; } } while (false)
410 << " **********\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "********** " << getPassName
() << " : " << MF.getName() << " **********\n"
; } } while (false)
;
411
412 // Only run if this pass is forced enabled or we detect the relevant function
413 // attribute requesting SLH.
414 if (!EnableSpeculativeLoadHardening &&
415 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
416 return false;
417
418 Subtarget = &MF.getSubtarget<X86Subtarget>();
419 MRI = &MF.getRegInfo();
420 TII = Subtarget->getInstrInfo();
421 TRI = Subtarget->getRegisterInfo();
422
423 // FIXME: Support for 32-bit.
424 PS.emplace(MF, &X86::GR64_NOSPRegClass);
425
426 if (MF.begin() == MF.end())
427 // Nothing to do for a degenerate empty function...
428 return false;
429
430 // We support an alternative hardening technique based on a debug flag.
431 if (HardenEdgesWithLFENCE) {
432 hardenEdgesWithLFENCE(MF);
433 return true;
434 }
435
436 // Create a dummy debug loc to use for all the generated code here.
437 DebugLoc Loc;
438
439 MachineBasicBlock &Entry = *MF.begin();
440 auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin());
441
442 // Do a quick scan to see if we have any checkable loads.
443 bool HasVulnerableLoad = hasVulnerableLoad(MF);
444
445 // See if we have any conditional branching blocks that we will need to trace
446 // predicate state through.
447 SmallVector<BlockCondInfo, 16> Infos = collectBlockCondInfo(MF);
448
449 // If we have no interesting conditions or loads, nothing to do here.
450 if (!HasVulnerableLoad && Infos.empty())
451 return true;
452
453 // The poison value is required to be an all-ones value for many aspects of
454 // this mitigation.
455 const int PoisonVal = -1;
456 PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
457 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg)
458 .addImm(PoisonVal);
459 ++NumInstsInserted;
460
461 // If we have loads being hardened and we've asked for call and ret edges to
462 // get a full fence-based mitigation, inject that fence.
463 if (HasVulnerableLoad && FenceCallAndRet) {
464 // We need to insert an LFENCE at the start of the function to suspend any
465 // incoming misspeculation from the caller. This helps two-fold: the caller
466 // may not have been protected as this code has been, and this code gets to
467 // not take any specific action to protect across calls.
468 // FIXME: We could skip this for functions which unconditionally return
469 // a constant.
470 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE));
471 ++NumInstsInserted;
472 ++NumLFENCEsInserted;
473 }
474
475 // If we guarded the entry with an LFENCE and have no conditionals to protect
476 // in blocks, then we're done.
477 if (FenceCallAndRet && Infos.empty())
478 // We may have changed the function's code at this point to insert fences.
479 return true;
480
481 // For every basic block in the function which can b
482 if (HardenInterprocedurally && !FenceCallAndRet) {
483 // Set up the predicate state by extracting it from the incoming stack
484 // pointer so we pick up any misspeculation in our caller.
485 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
486 } else {
487 // Otherwise, just build the predicate state itself by zeroing a register
488 // as we don't need any initial state.
489 PS->InitialReg = MRI->createVirtualRegister(PS->RC);
490 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64r0),
491 PS->InitialReg);
492 ++NumInstsInserted;
493 MachineOperand *ZeroEFLAGSDefOp =
494 ZeroI->findRegisterDefOperand(X86::EFLAGS);
495 assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&((ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit()
&& "Must have an implicit def of EFLAGS!") ? static_cast
<void> (0) : __assert_fail ("ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() && \"Must have an implicit def of EFLAGS!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 496, __PRETTY_FUNCTION__))
496 "Must have an implicit def of EFLAGS!")((ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit()
&& "Must have an implicit def of EFLAGS!") ? static_cast
<void> (0) : __assert_fail ("ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() && \"Must have an implicit def of EFLAGS!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 496, __PRETTY_FUNCTION__))
;
497 ZeroEFLAGSDefOp->setIsDead(true);
498 }
499
500 // We're going to need to trace predicate state throughout the function's
501 // CFG. Prepare for this by setting up our initial state of PHIs with unique
502 // predecessor entries and all the initial predicate state.
503 canonicalizePHIOperands(MF);
504
505 // Track the updated values in an SSA updater to rewrite into SSA form at the
506 // end.
507 PS->SSA.Initialize(PS->InitialReg);
508 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
509
510 // Trace through the CFG.
511 auto CMovs = tracePredStateThroughCFG(MF, Infos);
512
513 // We may also enter basic blocks in this function via exception handling
514 // control flow. Here, if we are hardening interprocedurally, we need to
515 // re-capture the predicate state from the throwing code. In the Itanium ABI,
516 // the throw will always look like a call to __cxa_throw and will have the
517 // predicate state in the stack pointer, so extract fresh predicate state from
518 // the stack pointer and make it available in SSA.
519 // FIXME: Handle non-itanium ABI EH models.
520 if (HardenInterprocedurally) {
521 for (MachineBasicBlock &MBB : MF) {
522 assert(!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!")((!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isEHScopeEntry() && \"Only Itanium ABI EH supported!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 522, __PRETTY_FUNCTION__))
;
523 assert(!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!")((!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isEHFuncletEntry() && \"Only Itanium ABI EH supported!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 523, __PRETTY_FUNCTION__))
;
524 assert(!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!")((!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isCleanupFuncletEntry() && \"Only Itanium ABI EH supported!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 524, __PRETTY_FUNCTION__))
;
525 if (!MBB.isEHPad())
526 continue;
527 PS->SSA.AddAvailableValue(
528 &MBB,
529 extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc));
530 }
531 }
532
533 if (HardenIndirectCallsAndJumps) {
534 // If we are going to harden calls and jumps we need to unfold their memory
535 // operands.
536 unfoldCallAndJumpLoads(MF);
537
538 // Then we trace predicate state through the indirect branches.
539 auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
540 CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
541 }
542
543 // Now that we have the predicate state available at the start of each block
544 // in the CFG, trace it through each block, hardening vulnerable instructions
545 // as we go.
546 tracePredStateThroughBlocksAndHarden(MF);
547
548 // Now rewrite all the uses of the pred state using the SSA updater to insert
549 // PHIs connecting the state between blocks along the CFG edges.
550 for (MachineInstr *CMovI : CMovs)
551 for (MachineOperand &Op : CMovI->operands()) {
552 if (!Op.isReg() || Op.getReg() != PS->InitialReg)
553 continue;
554
555 PS->SSA.RewriteUse(Op);
556 }
557
558 LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "Final speculative load hardened function:\n"
; MF.dump(); dbgs() << "\n"; MF.verify(this); } } while
(false)
559 dbgs() << "\n"; MF.verify(this))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "Final speculative load hardened function:\n"
; MF.dump(); dbgs() << "\n"; MF.verify(this); } } while
(false)
;
560 return true;
561}
562
563/// Implements the naive hardening approach of putting an LFENCE after every
564/// potentially mis-predicted control flow construct.
565///
566/// We include this as an alternative mostly for the purpose of comparison. The
567/// performance impact of this is expected to be extremely severe and not
568/// practical for any real-world users.
569void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE(
570 MachineFunction &MF) {
571 // First, we scan the function looking for blocks that are reached along edges
572 // that we might want to harden.
573 SmallSetVector<MachineBasicBlock *, 8> Blocks;
574 for (MachineBasicBlock &MBB : MF) {
575 // If there are no or only one successor, nothing to do here.
576 if (MBB.succ_size() <= 1)
577 continue;
578
579 // Skip blocks unless their terminators start with a branch. Other
580 // terminators don't seem interesting for guarding against misspeculation.
581 auto TermIt = MBB.getFirstTerminator();
582 if (TermIt == MBB.end() || !TermIt->isBranch())
583 continue;
584
585 // Add all the non-EH-pad succossors to the blocks we want to harden. We
586 // skip EH pads because there isn't really a condition of interest on
587 // entering.
588 for (MachineBasicBlock *SuccMBB : MBB.successors())
589 if (!SuccMBB->isEHPad())
590 Blocks.insert(SuccMBB);
591 }
592
593 for (MachineBasicBlock *MBB : Blocks) {
594 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
595 BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE));
596 ++NumInstsInserted;
597 ++NumLFENCEsInserted;
598 }
599}
600
601SmallVector<X86SpeculativeLoadHardeningPass::BlockCondInfo, 16>
602X86SpeculativeLoadHardeningPass::collectBlockCondInfo(MachineFunction &MF) {
603 SmallVector<BlockCondInfo, 16> Infos;
604
605 // Walk the function and build up a summary for each block's conditions that
606 // we need to trace through.
607 for (MachineBasicBlock &MBB : MF) {
608 // If there are no or only one successor, nothing to do here.
609 if (MBB.succ_size() <= 1)
610 continue;
611
612 // We want to reliably handle any conditional branch terminators in the
613 // MBB, so we manually analyze the branch. We can handle all of the
614 // permutations here, including ones that analyze branch cannot.
615 //
616 // The approach is to walk backwards across the terminators, resetting at
617 // any unconditional non-indirect branch, and track all conditional edges
618 // to basic blocks as well as the fallthrough or unconditional successor
619 // edge. For each conditional edge, we track the target and the opposite
620 // condition code in order to inject a "no-op" cmov into that successor
621 // that will harden the predicate. For the fallthrough/unconditional
622 // edge, we inject a separate cmov for each conditional branch with
623 // matching condition codes. This effectively implements an "and" of the
624 // condition flags, even if there isn't a single condition flag that would
625 // directly implement that. We don't bother trying to optimize either of
626 // these cases because if such an optimization is possible, LLVM should
627 // have optimized the conditional *branches* in that way already to reduce
628 // instruction count. This late, we simply assume the minimal number of
629 // branch instructions is being emitted and use that to guide our cmov
630 // insertion.
631
632 BlockCondInfo Info = {&MBB, {}, nullptr};
633
634 // Now walk backwards through the terminators and build up successors they
635 // reach and the conditions.
636 for (MachineInstr &MI : llvm::reverse(MBB)) {
637 // Once we've handled all the terminators, we're done.
638 if (!MI.isTerminator())
639 break;
640
641 // If we see a non-branch terminator, we can't handle anything so bail.
642 if (!MI.isBranch()) {
643 Info.CondBrs.clear();
644 break;
645 }
646
647 // If we see an unconditional branch, reset our state, clear any
648 // fallthrough, and set this is the "else" successor.
649 if (MI.getOpcode() == X86::JMP_1) {
650 Info.CondBrs.clear();
651 Info.UncondBr = &MI;
652 continue;
653 }
654
655 // If we get an invalid condition, we have an indirect branch or some
656 // other unanalyzable "fallthrough" case. We model this as a nullptr for
657 // the destination so we can still guard any conditional successors.
658 // Consider code sequences like:
659 // ```
660 // jCC L1
661 // jmpq *%rax
662 // ```
663 // We still want to harden the edge to `L1`.
664 if (X86::getCondFromBranchOpc(MI.getOpcode()) == X86::COND_INVALID) {
665 Info.CondBrs.clear();
666 Info.UncondBr = &MI;
667 continue;
668 }
669
670 // We have a vanilla conditional branch, add it to our list.
671 Info.CondBrs.push_back(&MI);
672 }
673 if (Info.CondBrs.empty()) {
674 ++NumBranchesUntraced;
675 LLVM_DEBUG(dbgs() << "WARNING: unable to secure successors of block:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "WARNING: unable to secure successors of block:\n"
; MBB.dump(); } } while (false)
676 MBB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "WARNING: unable to secure successors of block:\n"
; MBB.dump(); } } while (false)
;
677 continue;
678 }
679
680 Infos.push_back(Info);
681 }
682
683 return Infos;
684}
685
686/// Trace the predicate state through the CFG, instrumenting each conditional
687/// branch such that misspeculation through an edge will poison the predicate
688/// state.
689///
690/// Returns the list of inserted CMov instructions so that they can have their
691/// uses of the predicate state rewritten into proper SSA form once it is
692/// complete.
693SmallVector<MachineInstr *, 16>
694X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
695 MachineFunction &MF, ArrayRef<BlockCondInfo> Infos) {
696 // Collect the inserted cmov instructions so we can rewrite their uses of the
697 // predicate state into SSA form.
698 SmallVector<MachineInstr *, 16> CMovs;
699
700 // Now walk all of the basic blocks looking for ones that end in conditional
701 // jumps where we need to update this register along each edge.
702 for (const BlockCondInfo &Info : Infos) {
703 MachineBasicBlock &MBB = *Info.MBB;
704 const SmallVectorImpl<MachineInstr *> &CondBrs = Info.CondBrs;
705 MachineInstr *UncondBr = Info.UncondBr;
706
707 LLVM_DEBUG(dbgs() << "Tracing predicate through block: " << MBB.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "Tracing predicate through block: "
<< MBB.getName() << "\n"; } } while (false)
708 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "Tracing predicate through block: "
<< MBB.getName() << "\n"; } } while (false)
;
709 ++NumCondBranchesTraced;
710
711 // Compute the non-conditional successor as either the target of any
712 // unconditional branch or the layout successor.
713 MachineBasicBlock *UncondSucc =
714 UncondBr ? (UncondBr->getOpcode() == X86::JMP_1
715 ? UncondBr->getOperand(0).getMBB()
716 : nullptr)
717 : &*std::next(MachineFunction::iterator(&MBB));
718
719 // Count how many edges there are to any given successor.
720 SmallDenseMap<MachineBasicBlock *, int> SuccCounts;
721 if (UncondSucc)
722 ++SuccCounts[UncondSucc];
723 for (auto *CondBr : CondBrs)
724 ++SuccCounts[CondBr->getOperand(0).getMBB()];
725
726 // A lambda to insert cmov instructions into a block checking all of the
727 // condition codes in a sequence.
728 auto BuildCheckingBlockForSuccAndConds =
729 [&](MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount,
730 MachineInstr *Br, MachineInstr *&UncondBr,
731 ArrayRef<X86::CondCode> Conds) {
732 // First, we split the edge to insert the checking block into a safe
733 // location.
734 auto &CheckingMBB =
735 (SuccCount == 1 && Succ.pred_size() == 1)
736 ? Succ
737 : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII);
738
739 bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS);
740 if (!LiveEFLAGS)
741 CheckingMBB.addLiveIn(X86::EFLAGS);
742
743 // Now insert the cmovs to implement the checks.
744 auto InsertPt = CheckingMBB.begin();
745 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&(((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
"Should never have a PHI in the initial checking block as it "
"always has a single predecessor!") ? static_cast<void>
(0) : __assert_fail ("(InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) && \"Should never have a PHI in the initial checking block as it \" \"always has a single predecessor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 747, __PRETTY_FUNCTION__))
746 "Should never have a PHI in the initial checking block as it "(((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
"Should never have a PHI in the initial checking block as it "
"always has a single predecessor!") ? static_cast<void>
(0) : __assert_fail ("(InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) && \"Should never have a PHI in the initial checking block as it \" \"always has a single predecessor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 747, __PRETTY_FUNCTION__))
747 "always has a single predecessor!")(((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
"Should never have a PHI in the initial checking block as it "
"always has a single predecessor!") ? static_cast<void>
(0) : __assert_fail ("(InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) && \"Should never have a PHI in the initial checking block as it \" \"always has a single predecessor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 747, __PRETTY_FUNCTION__))
;
748
749 // We will wire each cmov to each other, but need to start with the
750 // incoming pred state.
751 unsigned CurStateReg = PS->InitialReg;
752
753 for (X86::CondCode Cond : Conds) {
754 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
755 auto CMovOp = X86::getCMovFromCond(Cond, PredStateSizeInBytes);
756
757 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
758 // Note that we intentionally use an empty debug location so that
759 // this picks up the preceding location.
760 auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
761 TII->get(CMovOp), UpdatedStateReg)
762 .addReg(CurStateReg)
763 .addReg(PS->PoisonReg);
764 // If this is the last cmov and the EFLAGS weren't originally
765 // live-in, mark them as killed.
766 if (!LiveEFLAGS && Cond == Conds.back())
767 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
768
769 ++NumInstsInserted;
770 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting cmov: "; CMovI->
dump(); dbgs() << "\n"; } } while (false)
771 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting cmov: "; CMovI->
dump(); dbgs() << "\n"; } } while (false)
;
772
773 // The first one of the cmovs will be using the top level
774 // `PredStateReg` and need to get rewritten into SSA form.
775 if (CurStateReg == PS->InitialReg)
776 CMovs.push_back(&*CMovI);
777
778 // The next cmov should start from this one's def.
779 CurStateReg = UpdatedStateReg;
780 }
781
782 // And put the last one into the available values for SSA form of our
783 // predicate state.
784 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
785 };
786
787 std::vector<X86::CondCode> UncondCodeSeq;
788 for (auto *CondBr : CondBrs) {
789 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
790 int &SuccCount = SuccCounts[&Succ];
791
792 X86::CondCode Cond = X86::getCondFromBranchOpc(CondBr->getOpcode());
793 X86::CondCode InvCond = X86::GetOppositeBranchCondition(Cond);
794 UncondCodeSeq.push_back(Cond);
795
796 BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr,
797 {InvCond});
798
799 // Decrement the successor count now that we've split one of the edges.
800 // We need to keep the count of edges to the successor accurate in order
801 // to know above when to *replace* the successor in the CFG vs. just
802 // adding the new successor.
803 --SuccCount;
804 }
805
806 // Since we may have split edges and changed the number of successors,
807 // normalize the probabilities. This avoids doing it each time we split an
808 // edge.
809 MBB.normalizeSuccProbs();
810
811 // Finally, we need to insert cmovs into the "fallthrough" edge. Here, we
812 // need to intersect the other condition codes. We can do this by just
813 // doing a cmov for each one.
814 if (!UncondSucc)
815 // If we have no fallthrough to protect (perhaps it is an indirect jump?)
816 // just skip this and continue.
817 continue;
818
819 assert(SuccCounts[UncondSucc] == 1 &&((SuccCounts[UncondSucc] == 1 && "We should never have more than one edge to the unconditional "
"successor at this point because every other edge must have been "
"split above!") ? static_cast<void> (0) : __assert_fail
("SuccCounts[UncondSucc] == 1 && \"We should never have more than one edge to the unconditional \" \"successor at this point because every other edge must have been \" \"split above!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 822, __PRETTY_FUNCTION__))
820 "We should never have more than one edge to the unconditional "((SuccCounts[UncondSucc] == 1 && "We should never have more than one edge to the unconditional "
"successor at this point because every other edge must have been "
"split above!") ? static_cast<void> (0) : __assert_fail
("SuccCounts[UncondSucc] == 1 && \"We should never have more than one edge to the unconditional \" \"successor at this point because every other edge must have been \" \"split above!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 822, __PRETTY_FUNCTION__))
821 "successor at this point because every other edge must have been "((SuccCounts[UncondSucc] == 1 && "We should never have more than one edge to the unconditional "
"successor at this point because every other edge must have been "
"split above!") ? static_cast<void> (0) : __assert_fail
("SuccCounts[UncondSucc] == 1 && \"We should never have more than one edge to the unconditional \" \"successor at this point because every other edge must have been \" \"split above!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 822, __PRETTY_FUNCTION__))
822 "split above!")((SuccCounts[UncondSucc] == 1 && "We should never have more than one edge to the unconditional "
"successor at this point because every other edge must have been "
"split above!") ? static_cast<void> (0) : __assert_fail
("SuccCounts[UncondSucc] == 1 && \"We should never have more than one edge to the unconditional \" \"successor at this point because every other edge must have been \" \"split above!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 822, __PRETTY_FUNCTION__))
;
823
824 // Sort and unique the codes to minimize them.
825 llvm::sort(UncondCodeSeq);
826 UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
827 UncondCodeSeq.end());
828
829 // Build a checking version of the successor.
830 BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, /*SuccCount*/ 1,
831 UncondBr, UncondBr, UncondCodeSeq);
832 }
833
834 return CMovs;
835}
836
837/// Compute the register class for the unfolded load.
838///
839/// FIXME: This should probably live in X86InstrInfo, potentially by adding
840/// a way to unfold into a newly created vreg rather than requiring a register
841/// input.
842static const TargetRegisterClass *
843getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII,
844 unsigned Opcode) {
845 unsigned Index;
846 unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
847 Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
848 const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
849 return TII.getRegClass(MCID, Index, &TII.getRegisterInfo(), MF);
850}
851
852void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
853 MachineFunction &MF) {
854 for (MachineBasicBlock &MBB : MF)
855 for (auto MII = MBB.instr_begin(), MIE = MBB.instr_end(); MII != MIE;) {
856 // Grab a reference and increment the iterator so we can remove this
857 // instruction if needed without disturbing the iteration.
858 MachineInstr &MI = *MII++;
859
860 // Must either be a call or a branch.
861 if (!MI.isCall() && !MI.isBranch())
862 continue;
863 // We only care about loading variants of these instructions.
864 if (!MI.mayLoad())
865 continue;
866
867 switch (MI.getOpcode()) {
868 default: {
869 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Found an unexpected loading branch or call "
"instruction:\n"; MI.dump(); dbgs() << "\n"; } } while
(false)
870 dbgs() << "ERROR: Found an unexpected loading branch or call "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Found an unexpected loading branch or call "
"instruction:\n"; MI.dump(); dbgs() << "\n"; } } while
(false)
871 "instruction:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Found an unexpected loading branch or call "
"instruction:\n"; MI.dump(); dbgs() << "\n"; } } while
(false)
872 MI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Found an unexpected loading branch or call "
"instruction:\n"; MI.dump(); dbgs() << "\n"; } } while
(false)
;
873 report_fatal_error("Unexpected loading branch or call!");
874 }
875
876 case X86::FARCALL16m:
877 case X86::FARCALL32m:
878 case X86::FARCALL64:
879 case X86::FARJMP16m:
880 case X86::FARJMP32m:
881 case X86::FARJMP64:
882 // We cannot mitigate far jumps or calls, but we also don't expect them
883 // to be vulnerable to Spectre v1.2 style attacks.
884 continue;
885
886 case X86::CALL16m:
887 case X86::CALL16m_NT:
888 case X86::CALL32m:
889 case X86::CALL32m_NT:
890 case X86::CALL64m:
891 case X86::CALL64m_NT:
892 case X86::JMP16m:
893 case X86::JMP16m_NT:
894 case X86::JMP32m:
895 case X86::JMP32m_NT:
896 case X86::JMP64m:
897 case X86::JMP64m_NT:
898 case X86::TAILJMPm64:
899 case X86::TAILJMPm64_REX:
900 case X86::TAILJMPm:
901 case X86::TCRETURNmi64:
902 case X86::TCRETURNmi: {
903 // Use the generic unfold logic now that we know we're dealing with
904 // expected instructions.
905 // FIXME: We don't have test coverage for all of these!
906 auto *UnfoldedRC = getRegClassForUnfoldedLoad(MF, *TII, MI.getOpcode());
907 if (!UnfoldedRC) {
908 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Unable to unfold load from instruction:\n"
; MI.dump(); dbgs() << "\n"; } } while (false)
909 << "ERROR: Unable to unfold load from instruction:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Unable to unfold load from instruction:\n"
; MI.dump(); dbgs() << "\n"; } } while (false)
910 MI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "ERROR: Unable to unfold load from instruction:\n"
; MI.dump(); dbgs() << "\n"; } } while (false)
;
911 report_fatal_error("Unable to unfold load!");
912 }
913 unsigned Reg = MRI->createVirtualRegister(UnfoldedRC);
914 SmallVector<MachineInstr *, 2> NewMIs;
915 // If we were able to compute an unfolded reg class, any failure here
916 // is just a programming error so just assert.
917 bool Unfolded =
918 TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true,
919 /*UnfoldStore*/ false, NewMIs);
920 (void)Unfolded;
921 assert(Unfolded &&((Unfolded && "Computed unfolded register class but failed to unfold"
) ? static_cast<void> (0) : __assert_fail ("Unfolded && \"Computed unfolded register class but failed to unfold\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 922, __PRETTY_FUNCTION__))
922 "Computed unfolded register class but failed to unfold")((Unfolded && "Computed unfolded register class but failed to unfold"
) ? static_cast<void> (0) : __assert_fail ("Unfolded && \"Computed unfolded register class but failed to unfold\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 922, __PRETTY_FUNCTION__))
;
923 // Now stitch the new instructions into place and erase the old one.
924 for (auto *NewMI : NewMIs)
925 MBB.insert(MI.getIterator(), NewMI);
926 MI.eraseFromParent();
927 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
928 dbgs() << "Unfolded load successfully into:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
929 for (auto *NewMI : NewMIs) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
930 NewMI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
931 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
932 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
933 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "Unfolded load successfully into:\n"
; for (auto *NewMI : NewMIs) { NewMI->dump(); dbgs() <<
"\n"; } }; } } while (false)
;
934 continue;
935 }
936 }
937 llvm_unreachable("Escaped switch with default!")::llvm::llvm_unreachable_internal("Escaped switch with default!"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 937)
;
938 }
939}
940
941/// Trace the predicate state through indirect branches, instrumenting them to
942/// poison the state if a target is reached that does not match the expected
943/// target.
944///
945/// This is designed to mitigate Spectre variant 1 attacks where an indirect
946/// branch is trained to predict a particular target and then mispredicts that
947/// target in a way that can leak data. Despite using an indirect branch, this
948/// is really a variant 1 style attack: it does not steer execution to an
949/// arbitrary or attacker controlled address, and it does not require any
950/// special code executing next to the victim. This attack can also be mitigated
951/// through retpolines, but those require either replacing indirect branches
952/// with conditional direct branches or lowering them through a device that
953/// blocks speculation. This mitigation can replace these retpoline-style
954/// mitigations for jump tables and other indirect branches within a function
955/// when variant 2 isn't a risk while allowing limited speculation. Indirect
956/// calls, however, cannot be mitigated through this technique without changing
957/// the ABI in a fundamental way.
958SmallVector<MachineInstr *, 16>
959X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
960 MachineFunction &MF) {
961 // We use the SSAUpdater to insert PHI nodes for the target addresses of
962 // indirect branches. We don't actually need the full power of the SSA updater
963 // in this particular case as we always have immediately available values, but
964 // this avoids us having to re-implement the PHI construction logic.
965 MachineSSAUpdater TargetAddrSSA(MF);
966 TargetAddrSSA.Initialize(MRI->createVirtualRegister(&X86::GR64RegClass));
967
968 // Track which blocks were terminated with an indirect branch.
969 SmallPtrSet<MachineBasicBlock *, 4> IndirectTerminatedMBBs;
970
971 // We need to know what blocks end up reached via indirect branches. We
972 // expect this to be a subset of those whose address is taken and so track it
973 // directly via the CFG.
974 SmallPtrSet<MachineBasicBlock *, 4> IndirectTargetMBBs;
975
976 // Walk all the blocks which end in an indirect branch and make the
977 // target address available.
978 for (MachineBasicBlock &MBB : MF) {
979 // Find the last terminator.
980 auto MII = MBB.instr_rbegin();
981 while (MII != MBB.instr_rend() && MII->isDebugInstr())
982 ++MII;
983 if (MII == MBB.instr_rend())
984 continue;
985 MachineInstr &TI = *MII;
986 if (!TI.isTerminator() || !TI.isBranch())
987 // No terminator or non-branch terminator.
988 continue;
989
990 unsigned TargetReg;
991
992 switch (TI.getOpcode()) {
993 default:
994 // Direct branch or conditional branch (leading to fallthrough).
995 continue;
996
997 case X86::FARJMP16m:
998 case X86::FARJMP32m:
999 case X86::FARJMP64:
1000 // We cannot mitigate far jumps or calls, but we also don't expect them
1001 // to be vulnerable to Spectre v1.2 or v2 (self trained) style attacks.
1002 continue;
1003
1004 case X86::JMP16m:
1005 case X86::JMP16m_NT:
1006 case X86::JMP32m:
1007 case X86::JMP32m_NT:
1008 case X86::JMP64m:
1009 case X86::JMP64m_NT:
1010 // Mostly as documentation.
1011 report_fatal_error("Memory operand jumps should have been unfolded!");
1012
1013 case X86::JMP16r:
1014 report_fatal_error(
1015 "Support for 16-bit indirect branches is not implemented.");
1016 case X86::JMP32r:
1017 report_fatal_error(
1018 "Support for 32-bit indirect branches is not implemented.");
1019
1020 case X86::JMP64r:
1021 TargetReg = TI.getOperand(0).getReg();
1022 }
1023
1024 // We have definitely found an indirect branch. Verify that there are no
1025 // preceding conditional branches as we don't yet support that.
1026 if (llvm::any_of(MBB.terminators(), [&](MachineInstr &OtherTI) {
1027 return !OtherTI.isDebugInstr() && &OtherTI != &TI;
1028 })) {
1029 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1030 dbgs() << "ERROR: Found other terminators in a block with an indirect "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1031 "branch! This is not yet supported! Terminator sequence:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1032 for (MachineInstr &MI : MBB.terminators()) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1033 MI.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1034 dbgs() << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1035 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
1036 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found other terminators in a block with an indirect "
"branch! This is not yet supported! Terminator sequence:\n";
for (MachineInstr &MI : MBB.terminators()) { MI.dump(); dbgs
() << '\n'; } }; } } while (false)
;
1037 report_fatal_error("Unimplemented terminator sequence!");
1038 }
1039
1040 // Make the target register an available value for this block.
1041 TargetAddrSSA.AddAvailableValue(&MBB, TargetReg);
1042 IndirectTerminatedMBBs.insert(&MBB);
1043
1044 // Add all the successors to our target candidates.
1045 for (MachineBasicBlock *Succ : MBB.successors())
1046 IndirectTargetMBBs.insert(Succ);
1047 }
1048
1049 // Keep track of the cmov instructions we insert so we can return them.
1050 SmallVector<MachineInstr *, 16> CMovs;
1051
1052 // If we didn't find any indirect branches with targets, nothing to do here.
1053 if (IndirectTargetMBBs.empty())
1054 return CMovs;
1055
1056 // We found indirect branches and targets that need to be instrumented to
1057 // harden loads within them. Walk the blocks of the function (to get a stable
1058 // ordering) and instrument each target of an indirect branch.
1059 for (MachineBasicBlock &MBB : MF) {
1060 // Skip the blocks that aren't candidate targets.
1061 if (!IndirectTargetMBBs.count(&MBB))
1062 continue;
1063
1064 // We don't expect EH pads to ever be reached via an indirect branch. If
1065 // this is desired for some reason, we could simply skip them here rather
1066 // than asserting.
1067 assert(!MBB.isEHPad() &&((!MBB.isEHPad() && "Unexpected EH pad as target of an indirect branch!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isEHPad() && \"Unexpected EH pad as target of an indirect branch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1068, __PRETTY_FUNCTION__))
1068 "Unexpected EH pad as target of an indirect branch!")((!MBB.isEHPad() && "Unexpected EH pad as target of an indirect branch!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isEHPad() && \"Unexpected EH pad as target of an indirect branch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1068, __PRETTY_FUNCTION__))
;
1069
1070 // We should never end up threading EFLAGS into a block to harden
1071 // conditional jumps as there would be an additional successor via the
1072 // indirect branch. As a consequence, all such edges would be split before
1073 // reaching here, and the inserted block will handle the EFLAGS-based
1074 // hardening.
1075 assert(!MBB.isLiveIn(X86::EFLAGS) &&((!MBB.isLiveIn(X86::EFLAGS) && "Cannot check within a block that already has live-in EFLAGS!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isLiveIn(X86::EFLAGS) && \"Cannot check within a block that already has live-in EFLAGS!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1076, __PRETTY_FUNCTION__))
1076 "Cannot check within a block that already has live-in EFLAGS!")((!MBB.isLiveIn(X86::EFLAGS) && "Cannot check within a block that already has live-in EFLAGS!"
) ? static_cast<void> (0) : __assert_fail ("!MBB.isLiveIn(X86::EFLAGS) && \"Cannot check within a block that already has live-in EFLAGS!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1076, __PRETTY_FUNCTION__))
;
1077
1078 // We can't handle having non-indirect edges into this block unless this is
1079 // the only successor and we can synthesize the necessary target address.
1080 for (MachineBasicBlock *Pred : MBB.predecessors()) {
1081 // If we've already handled this by extracting the target directly,
1082 // nothing to do.
1083 if (IndirectTerminatedMBBs.count(Pred))
1084 continue;
1085
1086 // Otherwise, we have to be the only successor. We generally expect this
1087 // to be true as conditional branches should have had a critical edge
1088 // split already. We don't however need to worry about EH pad successors
1089 // as they'll happily ignore the target and their hardening strategy is
1090 // resilient to all ways in which they could be reached speculatively.
1091 if (!llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) {
1092 return Succ->isEHPad() || Succ == &MBB;
1093 })) {
1094 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found conditional entry to target of indirect "
"branch!\n"; Pred->dump(); MBB.dump(); }; } } while (false
)
1095 dbgs() << "ERROR: Found conditional entry to target of indirect "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found conditional entry to target of indirect "
"branch!\n"; Pred->dump(); MBB.dump(); }; } } while (false
)
1096 "branch!\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found conditional entry to target of indirect "
"branch!\n"; Pred->dump(); MBB.dump(); }; } } while (false
)
1097 Pred->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found conditional entry to target of indirect "
"branch!\n"; Pred->dump(); MBB.dump(); }; } } while (false
)
1098 MBB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found conditional entry to target of indirect "
"branch!\n"; Pred->dump(); MBB.dump(); }; } } while (false
)
1099 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { { dbgs() << "ERROR: Found conditional entry to target of indirect "
"branch!\n"; Pred->dump(); MBB.dump(); }; } } while (false
)
;
1100 report_fatal_error("Cannot harden a conditional entry to a target of "
1101 "an indirect branch!");
1102 }
1103
1104 // Now we need to compute the address of this block and install it as a
1105 // synthetic target in the predecessor. We do this at the bottom of the
1106 // predecessor.
1107 auto InsertPt = Pred->getFirstTerminator();
1108 unsigned TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1109 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
1110 !Subtarget->isPositionIndependent()) {
1111 // Directly materialize it into an immediate.
1112 auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(),
1113 TII->get(X86::MOV64ri32), TargetReg)
1114 .addMBB(&MBB);
1115 ++NumInstsInserted;
1116 (void)AddrI;
1117 LLVM_DEBUG(dbgs() << " Inserting mov: "; AddrI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting mov: "; AddrI->
dump(); dbgs() << "\n"; } } while (false)
1118 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting mov: "; AddrI->
dump(); dbgs() << "\n"; } } while (false)
;
1119 } else {
1120 auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(), TII->get(X86::LEA64r),
1121 TargetReg)
1122 .addReg(/*Base*/ X86::RIP)
1123 .addImm(/*Scale*/ 1)
1124 .addReg(/*Index*/ 0)
1125 .addMBB(&MBB)
1126 .addReg(/*Segment*/ 0);
1127 ++NumInstsInserted;
1128 (void)AddrI;
1129 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting lea: "; AddrI->
dump(); dbgs() << "\n"; } } while (false)
1130 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting lea: "; AddrI->
dump(); dbgs() << "\n"; } } while (false)
;
1131 }
1132 // And make this available.
1133 TargetAddrSSA.AddAvailableValue(Pred, TargetReg);
1134 }
1135
1136 // Materialize the needed SSA value of the target. Note that we need the
1137 // middle of the block as this block might at the bottom have an indirect
1138 // branch back to itself. We can do this here because at this point, every
1139 // predecessor of this block has an available value. This is basically just
1140 // automating the construction of a PHI node for this target.
1141 unsigned TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
1142
1143 // Insert a comparison of the incoming target register with this block's
1144 // address.
1145 auto InsertPt = MBB.SkipPHIsLabelsAndDebug(MBB.begin());
1146 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
1147 !Subtarget->isPositionIndependent()) {
1148 // Check directly against a relocated immediate when we can.
1149 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64ri32))
1150 .addReg(TargetReg, RegState::Kill)
1151 .addMBB(&MBB);
1152 ++NumInstsInserted;
1153 (void)CheckI;
1154 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting cmp: "; CheckI->
dump(); dbgs() << "\n"; } } while (false)
;
1155 } else {
1156 // Otherwise compute the address into a register first.
1157 unsigned AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1158 auto AddrI =
1159 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg)
1160 .addReg(/*Base*/ X86::RIP)
1161 .addImm(/*Scale*/ 1)
1162 .addReg(/*Index*/ 0)
1163 .addMBB(&MBB)
1164 .addReg(/*Segment*/ 0);
1165 ++NumInstsInserted;
1166 (void)AddrI;
1167 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting lea: "; AddrI->
dump(); dbgs() << "\n"; } } while (false)
;
1168 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
1169 .addReg(TargetReg, RegState::Kill)
1170 .addReg(AddrReg, RegState::Kill);
1171 ++NumInstsInserted;
1172 (void)CheckI;
1173 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting cmp: "; CheckI->
dump(); dbgs() << "\n"; } } while (false)
;
1174 }
1175
1176 // Now cmov over the predicate if the comparison wasn't equal.
1177 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
1178 auto CMovOp = X86::getCMovFromCond(X86::COND_NE, PredStateSizeInBytes);
1179 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
1180 auto CMovI =
1181 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg)
1182 .addReg(PS->InitialReg)
1183 .addReg(PS->PoisonReg);
1184 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
1185 ++NumInstsInserted;
1186 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting cmov: "; CMovI->
dump(); dbgs() << "\n"; } } while (false)
;
1187 CMovs.push_back(&*CMovI);
1188
1189 // And put the new value into the available values for SSA form of our
1190 // predicate state.
1191 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
1192 }
1193
1194 // Return all the newly inserted cmov instructions of the predicate state.
1195 return CMovs;
1196}
1197
1198/// Returns true if the instruction has no behavior (specified or otherwise)
1199/// that is based on the value of any of its register operands
1200///
1201/// A classical example of something that is inherently not data invariant is an
1202/// indirect jump -- the destination is loaded into icache based on the bits set
1203/// in the jump destination register.
1204///
1205/// FIXME: This should become part of our instruction tables.
1206static bool isDataInvariant(MachineInstr &MI) {
1207 switch (MI.getOpcode()) {
1208 default:
1209 // By default, assume that the instruction is not data invariant.
1210 return false;
1211
1212 // Some target-independent operations that trivially lower to data-invariant
1213 // instructions.
1214 case TargetOpcode::COPY:
1215 case TargetOpcode::INSERT_SUBREG:
1216 case TargetOpcode::SUBREG_TO_REG:
1217 return true;
1218
1219 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
1220 // However, they set flags and are perhaps the most surprisingly constant
1221 // time operations so we call them out here separately.
1222 case X86::IMUL16rr:
1223 case X86::IMUL16rri8:
1224 case X86::IMUL16rri:
1225 case X86::IMUL32rr:
1226 case X86::IMUL32rri8:
1227 case X86::IMUL32rri:
1228 case X86::IMUL64rr:
1229 case X86::IMUL64rri32:
1230 case X86::IMUL64rri8:
1231
1232 // Bit scanning and counting instructions that are somewhat surprisingly
1233 // constant time as they scan across bits and do other fairly complex
1234 // operations like popcnt, but are believed to be constant time on x86.
1235 // However, these set flags.
1236 case X86::BSF16rr:
1237 case X86::BSF32rr:
1238 case X86::BSF64rr:
1239 case X86::BSR16rr:
1240 case X86::BSR32rr:
1241 case X86::BSR64rr:
1242 case X86::LZCNT16rr:
1243 case X86::LZCNT32rr:
1244 case X86::LZCNT64rr:
1245 case X86::POPCNT16rr:
1246 case X86::POPCNT32rr:
1247 case X86::POPCNT64rr:
1248 case X86::TZCNT16rr:
1249 case X86::TZCNT32rr:
1250 case X86::TZCNT64rr:
1251
1252 // Bit manipulation instructions are effectively combinations of basic
1253 // arithmetic ops, and should still execute in constant time. These also
1254 // set flags.
1255 case X86::BLCFILL32rr:
1256 case X86::BLCFILL64rr:
1257 case X86::BLCI32rr:
1258 case X86::BLCI64rr:
1259 case X86::BLCIC32rr:
1260 case X86::BLCIC64rr:
1261 case X86::BLCMSK32rr:
1262 case X86::BLCMSK64rr:
1263 case X86::BLCS32rr:
1264 case X86::BLCS64rr:
1265 case X86::BLSFILL32rr:
1266 case X86::BLSFILL64rr:
1267 case X86::BLSI32rr:
1268 case X86::BLSI64rr:
1269 case X86::BLSIC32rr:
1270 case X86::BLSIC64rr:
1271 case X86::BLSMSK32rr:
1272 case X86::BLSMSK64rr:
1273 case X86::BLSR32rr:
1274 case X86::BLSR64rr:
1275 case X86::TZMSK32rr:
1276 case X86::TZMSK64rr:
1277
1278 // Bit extracting and clearing instructions should execute in constant time,
1279 // and set flags.
1280 case X86::BEXTR32rr:
1281 case X86::BEXTR64rr:
1282 case X86::BEXTRI32ri:
1283 case X86::BEXTRI64ri:
1284 case X86::BZHI32rr:
1285 case X86::BZHI64rr:
1286
1287 // Shift and rotate.
1288 case X86::ROL8r1: case X86::ROL16r1: case X86::ROL32r1: case X86::ROL64r1:
1289 case X86::ROL8rCL: case X86::ROL16rCL: case X86::ROL32rCL: case X86::ROL64rCL:
1290 case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri:
1291 case X86::ROR8r1: case X86::ROR16r1: case X86::ROR32r1: case X86::ROR64r1:
1292 case X86::ROR8rCL: case X86::ROR16rCL: case X86::ROR32rCL: case X86::ROR64rCL:
1293 case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri:
1294 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1: case X86::SAR64r1:
1295 case X86::SAR8rCL: case X86::SAR16rCL: case X86::SAR32rCL: case X86::SAR64rCL:
1296 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri:
1297 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1: case X86::SHL64r1:
1298 case X86::SHL8rCL: case X86::SHL16rCL: case X86::SHL32rCL: case X86::SHL64rCL:
1299 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri:
1300 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1: case X86::SHR64r1:
1301 case X86::SHR8rCL: case X86::SHR16rCL: case X86::SHR32rCL: case X86::SHR64rCL:
1302 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri:
1303 case X86::SHLD16rrCL: case X86::SHLD32rrCL: case X86::SHLD64rrCL:
1304 case X86::SHLD16rri8: case X86::SHLD32rri8: case X86::SHLD64rri8:
1305 case X86::SHRD16rrCL: case X86::SHRD32rrCL: case X86::SHRD64rrCL:
1306 case X86::SHRD16rri8: case X86::SHRD32rri8: case X86::SHRD64rri8:
1307
1308 // Basic arithmetic is constant time on the input but does set flags.
1309 case X86::ADC8rr: case X86::ADC8ri:
1310 case X86::ADC16rr: case X86::ADC16ri: case X86::ADC16ri8:
1311 case X86::ADC32rr: case X86::ADC32ri: case X86::ADC32ri8:
1312 case X86::ADC64rr: case X86::ADC64ri8: case X86::ADC64ri32:
1313 case X86::ADD8rr: case X86::ADD8ri:
1314 case X86::ADD16rr: case X86::ADD16ri: case X86::ADD16ri8:
1315 case X86::ADD32rr: case X86::ADD32ri: case X86::ADD32ri8:
1316 case X86::ADD64rr: case X86::ADD64ri8: case X86::ADD64ri32:
1317 case X86::AND8rr: case X86::AND8ri:
1318 case X86::AND16rr: case X86::AND16ri: case X86::AND16ri8:
1319 case X86::AND32rr: case X86::AND32ri: case X86::AND32ri8:
1320 case X86::AND64rr: case X86::AND64ri8: case X86::AND64ri32:
1321 case X86::OR8rr: case X86::OR8ri:
1322 case X86::OR16rr: case X86::OR16ri: case X86::OR16ri8:
1323 case X86::OR32rr: case X86::OR32ri: case X86::OR32ri8:
1324 case X86::OR64rr: case X86::OR64ri8: case X86::OR64ri32:
1325 case X86::SBB8rr: case X86::SBB8ri:
1326 case X86::SBB16rr: case X86::SBB16ri: case X86::SBB16ri8:
1327 case X86::SBB32rr: case X86::SBB32ri: case X86::SBB32ri8:
1328 case X86::SBB64rr: case X86::SBB64ri8: case X86::SBB64ri32:
1329 case X86::SUB8rr: case X86::SUB8ri:
1330 case X86::SUB16rr: case X86::SUB16ri: case X86::SUB16ri8:
1331 case X86::SUB32rr: case X86::SUB32ri: case X86::SUB32ri8:
1332 case X86::SUB64rr: case X86::SUB64ri8: case X86::SUB64ri32:
1333 case X86::XOR8rr: case X86::XOR8ri:
1334 case X86::XOR16rr: case X86::XOR16ri: case X86::XOR16ri8:
1335 case X86::XOR32rr: case X86::XOR32ri: case X86::XOR32ri8:
1336 case X86::XOR64rr: case X86::XOR64ri8: case X86::XOR64ri32:
1337 // Arithmetic with just 32-bit and 64-bit variants and no immediates.
1338 case X86::ADCX32rr: case X86::ADCX64rr:
1339 case X86::ADOX32rr: case X86::ADOX64rr:
1340 case X86::ANDN32rr: case X86::ANDN64rr:
1341 // Unary arithmetic operations.
1342 case X86::DEC8r: case X86::DEC16r: case X86::DEC32r: case X86::DEC64r:
1343 case X86::INC8r: case X86::INC16r: case X86::INC32r: case X86::INC64r:
1344 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
1345 // Check whether the EFLAGS implicit-def is dead. We assume that this will
1346 // always find the implicit-def because this code should only be reached
1347 // for instructions that do in fact implicitly def this.
1348 if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) {
1349 // If we would clobber EFLAGS that are used, just bail for now.
1350 LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Unable to harden post-load due to EFLAGS: "
; MI.dump(); dbgs() << "\n"; } } while (false)
1351 MI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Unable to harden post-load due to EFLAGS: "
; MI.dump(); dbgs() << "\n"; } } while (false)
;
1352 return false;
1353 }
1354
1355 // Otherwise, fallthrough to handle these the same as instructions that
1356 // don't set EFLAGS.
1357 LLVM_FALLTHROUGH[[clang::fallthrough]];
1358
1359 // Unlike other arithmetic, NOT doesn't set EFLAGS.
1360 case X86::NOT8r: case X86::NOT16r: case X86::NOT32r: case X86::NOT64r:
1361
1362 // Various move instructions used to zero or sign extend things. Note that we
1363 // intentionally don't support the _NOREX variants as we can't handle that
1364 // register constraint anyways.
1365 case X86::MOVSX16rr8:
1366 case X86::MOVSX32rr8: case X86::MOVSX32rr16:
1367 case X86::MOVSX64rr8: case X86::MOVSX64rr16: case X86::MOVSX64rr32:
1368 case X86::MOVZX16rr8:
1369 case X86::MOVZX32rr8: case X86::MOVZX32rr16:
1370 case X86::MOVZX64rr8: case X86::MOVZX64rr16:
1371 case X86::MOV32rr:
1372
1373 // Arithmetic instructions that are both constant time and don't set flags.
1374 case X86::RORX32ri:
1375 case X86::RORX64ri:
1376 case X86::SARX32rr:
1377 case X86::SARX64rr:
1378 case X86::SHLX32rr:
1379 case X86::SHLX64rr:
1380 case X86::SHRX32rr:
1381 case X86::SHRX64rr:
1382
1383 // LEA doesn't actually access memory, and its arithmetic is constant time.
1384 case X86::LEA16r:
1385 case X86::LEA32r:
1386 case X86::LEA64_32r:
1387 case X86::LEA64r:
1388 return true;
1389 }
1390}
1391
1392/// Returns true if the instruction has no behavior (specified or otherwise)
1393/// that is based on the value loaded from memory or the value of any
1394/// non-address register operands.
1395///
1396/// For example, if the latency of the instruction is dependent on the
1397/// particular bits set in any of the registers *or* any of the bits loaded from
1398/// memory.
1399///
1400/// A classical example of something that is inherently not data invariant is an
1401/// indirect jump -- the destination is loaded into icache based on the bits set
1402/// in the jump destination register.
1403///
1404/// FIXME: This should become part of our instruction tables.
1405static bool isDataInvariantLoad(MachineInstr &MI) {
1406 switch (MI.getOpcode()) {
1407 default:
1408 // By default, assume that the load will immediately leak.
1409 return false;
1410
1411 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
1412 // However, they set flags and are perhaps the most surprisingly constant
1413 // time operations so we call them out here separately.
1414 case X86::IMUL16rm:
1415 case X86::IMUL16rmi8:
1416 case X86::IMUL16rmi:
1417 case X86::IMUL32rm:
1418 case X86::IMUL32rmi8:
1419 case X86::IMUL32rmi:
1420 case X86::IMUL64rm:
1421 case X86::IMUL64rmi32:
1422 case X86::IMUL64rmi8:
1423
1424 // Bit scanning and counting instructions that are somewhat surprisingly
1425 // constant time as they scan across bits and do other fairly complex
1426 // operations like popcnt, but are believed to be constant time on x86.
1427 // However, these set flags.
1428 case X86::BSF16rm:
1429 case X86::BSF32rm:
1430 case X86::BSF64rm:
1431 case X86::BSR16rm:
1432 case X86::BSR32rm:
1433 case X86::BSR64rm:
1434 case X86::LZCNT16rm:
1435 case X86::LZCNT32rm:
1436 case X86::LZCNT64rm:
1437 case X86::POPCNT16rm:
1438 case X86::POPCNT32rm:
1439 case X86::POPCNT64rm:
1440 case X86::TZCNT16rm:
1441 case X86::TZCNT32rm:
1442 case X86::TZCNT64rm:
1443
1444 // Bit manipulation instructions are effectively combinations of basic
1445 // arithmetic ops, and should still execute in constant time. These also
1446 // set flags.
1447 case X86::BLCFILL32rm:
1448 case X86::BLCFILL64rm:
1449 case X86::BLCI32rm:
1450 case X86::BLCI64rm:
1451 case X86::BLCIC32rm:
1452 case X86::BLCIC64rm:
1453 case X86::BLCMSK32rm:
1454 case X86::BLCMSK64rm:
1455 case X86::BLCS32rm:
1456 case X86::BLCS64rm:
1457 case X86::BLSFILL32rm:
1458 case X86::BLSFILL64rm:
1459 case X86::BLSI32rm:
1460 case X86::BLSI64rm:
1461 case X86::BLSIC32rm:
1462 case X86::BLSIC64rm:
1463 case X86::BLSMSK32rm:
1464 case X86::BLSMSK64rm:
1465 case X86::BLSR32rm:
1466 case X86::BLSR64rm:
1467 case X86::TZMSK32rm:
1468 case X86::TZMSK64rm:
1469
1470 // Bit extracting and clearing instructions should execute in constant time,
1471 // and set flags.
1472 case X86::BEXTR32rm:
1473 case X86::BEXTR64rm:
1474 case X86::BEXTRI32mi:
1475 case X86::BEXTRI64mi:
1476 case X86::BZHI32rm:
1477 case X86::BZHI64rm:
1478
1479 // Basic arithmetic is constant time on the input but does set flags.
1480 case X86::ADC8rm:
1481 case X86::ADC16rm:
1482 case X86::ADC32rm:
1483 case X86::ADC64rm:
1484 case X86::ADCX32rm:
1485 case X86::ADCX64rm:
1486 case X86::ADD8rm:
1487 case X86::ADD16rm:
1488 case X86::ADD32rm:
1489 case X86::ADD64rm:
1490 case X86::ADOX32rm:
1491 case X86::ADOX64rm:
1492 case X86::AND8rm:
1493 case X86::AND16rm:
1494 case X86::AND32rm:
1495 case X86::AND64rm:
1496 case X86::ANDN32rm:
1497 case X86::ANDN64rm:
1498 case X86::OR8rm:
1499 case X86::OR16rm:
1500 case X86::OR32rm:
1501 case X86::OR64rm:
1502 case X86::SBB8rm:
1503 case X86::SBB16rm:
1504 case X86::SBB32rm:
1505 case X86::SBB64rm:
1506 case X86::SUB8rm:
1507 case X86::SUB16rm:
1508 case X86::SUB32rm:
1509 case X86::SUB64rm:
1510 case X86::XOR8rm:
1511 case X86::XOR16rm:
1512 case X86::XOR32rm:
1513 case X86::XOR64rm:
1514 // Check whether the EFLAGS implicit-def is dead. We assume that this will
1515 // always find the implicit-def because this code should only be reached
1516 // for instructions that do in fact implicitly def this.
1517 if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) {
1518 // If we would clobber EFLAGS that are used, just bail for now.
1519 LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Unable to harden post-load due to EFLAGS: "
; MI.dump(); dbgs() << "\n"; } } while (false)
1520 MI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Unable to harden post-load due to EFLAGS: "
; MI.dump(); dbgs() << "\n"; } } while (false)
;
1521 return false;
1522 }
1523
1524 // Otherwise, fallthrough to handle these the same as instructions that
1525 // don't set EFLAGS.
1526 LLVM_FALLTHROUGH[[clang::fallthrough]];
1527
1528 // Integer multiply w/o affecting flags is still believed to be constant
1529 // time on x86. Called out separately as this is among the most surprising
1530 // instructions to exhibit that behavior.
1531 case X86::MULX32rm:
1532 case X86::MULX64rm:
1533
1534 // Arithmetic instructions that are both constant time and don't set flags.
1535 case X86::RORX32mi:
1536 case X86::RORX64mi:
1537 case X86::SARX32rm:
1538 case X86::SARX64rm:
1539 case X86::SHLX32rm:
1540 case X86::SHLX64rm:
1541 case X86::SHRX32rm:
1542 case X86::SHRX64rm:
1543
1544 // Conversions are believed to be constant time and don't set flags.
1545 case X86::CVTTSD2SI64rm: case X86::VCVTTSD2SI64rm: case X86::VCVTTSD2SI64Zrm:
1546 case X86::CVTTSD2SIrm: case X86::VCVTTSD2SIrm: case X86::VCVTTSD2SIZrm:
1547 case X86::CVTTSS2SI64rm: case X86::VCVTTSS2SI64rm: case X86::VCVTTSS2SI64Zrm:
1548 case X86::CVTTSS2SIrm: case X86::VCVTTSS2SIrm: case X86::VCVTTSS2SIZrm:
1549 case X86::CVTSI2SDrm: case X86::VCVTSI2SDrm: case X86::VCVTSI2SDZrm:
1550 case X86::CVTSI2SSrm: case X86::VCVTSI2SSrm: case X86::VCVTSI2SSZrm:
1551 case X86::CVTSI642SDrm: case X86::VCVTSI642SDrm: case X86::VCVTSI642SDZrm:
1552 case X86::CVTSI642SSrm: case X86::VCVTSI642SSrm: case X86::VCVTSI642SSZrm:
1553 case X86::CVTSS2SDrm: case X86::VCVTSS2SDrm: case X86::VCVTSS2SDZrm:
1554 case X86::CVTSD2SSrm: case X86::VCVTSD2SSrm: case X86::VCVTSD2SSZrm:
1555 // AVX512 added unsigned integer conversions.
1556 case X86::VCVTTSD2USI64Zrm:
1557 case X86::VCVTTSD2USIZrm:
1558 case X86::VCVTTSS2USI64Zrm:
1559 case X86::VCVTTSS2USIZrm:
1560 case X86::VCVTUSI2SDZrm:
1561 case X86::VCVTUSI642SDZrm:
1562 case X86::VCVTUSI2SSZrm:
1563 case X86::VCVTUSI642SSZrm:
1564
1565 // Loads to register don't set flags.
1566 case X86::MOV8rm:
1567 case X86::MOV8rm_NOREX:
1568 case X86::MOV16rm:
1569 case X86::MOV32rm:
1570 case X86::MOV64rm:
1571 case X86::MOVSX16rm8:
1572 case X86::MOVSX32rm16:
1573 case X86::MOVSX32rm8:
1574 case X86::MOVSX32rm8_NOREX:
1575 case X86::MOVSX64rm16:
1576 case X86::MOVSX64rm32:
1577 case X86::MOVSX64rm8:
1578 case X86::MOVZX16rm8:
1579 case X86::MOVZX32rm16:
1580 case X86::MOVZX32rm8:
1581 case X86::MOVZX32rm8_NOREX:
1582 case X86::MOVZX64rm16:
1583 case X86::MOVZX64rm8:
1584 return true;
1585 }
1586}
1587
1588static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1589 const TargetRegisterInfo &TRI) {
1590 // Check if EFLAGS are alive by seeing if there is a def of them or they
1591 // live-in, and then seeing if that def is in turn used.
1592 for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) {
1593 if (MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
1594 // If the def is dead, then EFLAGS is not live.
1595 if (DefOp->isDead())
1596 return false;
1597
1598 // Otherwise we've def'ed it, and it is live.
1599 return true;
1600 }
1601 // While at this instruction, also check if we use and kill EFLAGS
1602 // which means it isn't live.
1603 if (MI.killsRegister(X86::EFLAGS, &TRI))
1604 return false;
1605 }
1606
1607 // If we didn't find anything conclusive (neither definitely alive or
1608 // definitely dead) return whether it lives into the block.
1609 return MBB.isLiveIn(X86::EFLAGS);
1610}
1611
1612/// Trace the predicate state through each of the blocks in the function,
1613/// hardening everything necessary along the way.
1614///
1615/// We call this routine once the initial predicate state has been established
1616/// for each basic block in the function in the SSA updater. This routine traces
1617/// it through the instructions within each basic block, and for non-returning
1618/// blocks informs the SSA updater about the final state that lives out of the
1619/// block. Along the way, it hardens any vulnerable instruction using the
1620/// currently valid predicate state. We have to do these two things together
1621/// because the SSA updater only works across blocks. Within a block, we track
1622/// the current predicate state directly and update it as it changes.
1623///
1624/// This operates in two passes over each block. First, we analyze the loads in
1625/// the block to determine which strategy will be used to harden them: hardening
1626/// the address or hardening the loaded value when loaded into a register
1627/// amenable to hardening. We have to process these first because the two
1628/// strategies may interact -- later hardening may change what strategy we wish
1629/// to use. We also will analyze data dependencies between loads and avoid
1630/// hardening those loads that are data dependent on a load with a hardened
1631/// address. We also skip hardening loads already behind an LFENCE as that is
1632/// sufficient to harden them against misspeculation.
1633///
1634/// Second, we actively trace the predicate state through the block, applying
1635/// the hardening steps we determined necessary in the first pass as we go.
1636///
1637/// These two passes are applied to each basic block. We operate one block at a
1638/// time to simplify reasoning about reachability and sequencing.
1639void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
1640 MachineFunction &MF) {
1641 SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
1642 SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
1643
1644 SmallSet<unsigned, 16> HardenedAddrRegs;
1645
1646 SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg;
1647
1648 // Track the set of load-dependent registers through the basic block. Because
1649 // the values of these registers have an existing data dependency on a loaded
1650 // value which we would have checked, we can omit any checks on them.
1651 SparseBitVector<> LoadDepRegs;
1652
1653 for (MachineBasicBlock &MBB : MF) {
1654 // The first pass over the block: collect all the loads which can have their
1655 // loaded value hardened and all the loads that instead need their address
1656 // hardened. During this walk we propagate load dependence for address
1657 // hardened loads and also look for LFENCE to stop hardening wherever
1658 // possible. When deciding whether or not to harden the loaded value or not,
1659 // we check to see if any registers used in the address will have been
1660 // hardened at this point and if so, harden any remaining address registers
1661 // as that often successfully re-uses hardened addresses and minimizes
1662 // instructions.
1663 //
1664 // FIXME: We should consider an aggressive mode where we continue to keep as
1665 // many loads value hardened even when some address register hardening would
1666 // be free (due to reuse).
1667 //
1668 // Note that we only need this pass if we are actually hardening loads.
1669 if (HardenLoads)
1
Assuming the condition is false
2
Taking false branch
1670 for (MachineInstr &MI : MBB) {
1671 // We naively assume that all def'ed registers of an instruction have
1672 // a data dependency on all of their operands.
1673 // FIXME: Do a more careful analysis of x86 to build a conservative
1674 // model here.
1675 if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
1676 return Op.isReg() && LoadDepRegs.test(Op.getReg());
1677 }))
1678 for (MachineOperand &Def : MI.defs())
1679 if (Def.isReg())
1680 LoadDepRegs.set(Def.getReg());
1681
1682 // Both Intel and AMD are guiding that they will change the semantics of
1683 // LFENCE to be a speculation barrier, so if we see an LFENCE, there is
1684 // no more need to guard things in this block.
1685 if (MI.getOpcode() == X86::LFENCE)
1686 break;
1687
1688 // If this instruction cannot load, nothing to do.
1689 if (!MI.mayLoad())
1690 continue;
1691
1692 // Some instructions which "load" are trivially safe or unimportant.
1693 if (MI.getOpcode() == X86::MFENCE)
1694 continue;
1695
1696 // Extract the memory operand information about this instruction.
1697 // FIXME: This doesn't handle loading pseudo instructions which we often
1698 // could handle with similarly generic logic. We probably need to add an
1699 // MI-layer routine similar to the MC-layer one we use here which maps
1700 // pseudos much like this maps real instructions.
1701 const MCInstrDesc &Desc = MI.getDesc();
1702 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1703 if (MemRefBeginIdx < 0) {
1704 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "WARNING: unable to harden loading instruction: "
; MI.dump(); } } while (false)
1705 << "WARNING: unable to harden loading instruction: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "WARNING: unable to harden loading instruction: "
; MI.dump(); } } while (false)
1706 MI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << "WARNING: unable to harden loading instruction: "
; MI.dump(); } } while (false)
;
1707 continue;
1708 }
1709
1710 MemRefBeginIdx += X86II::getOperandBias(Desc);
1711
1712 MachineOperand &BaseMO =
1713 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1714 MachineOperand &IndexMO =
1715 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1716
1717 // If we have at least one (non-frame-index, non-RIP) register operand,
1718 // and neither operand is load-dependent, we need to check the load.
1719 unsigned BaseReg = 0, IndexReg = 0;
1720 if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
1721 BaseMO.getReg() != X86::NoRegister)
1722 BaseReg = BaseMO.getReg();
1723 if (IndexMO.getReg() != X86::NoRegister)
1724 IndexReg = IndexMO.getReg();
1725
1726 if (!BaseReg && !IndexReg)
1727 // No register operands!
1728 continue;
1729
1730 // If any register operand is dependent, this load is dependent and we
1731 // needn't check it.
1732 // FIXME: Is this true in the case where we are hardening loads after
1733 // they complete? Unclear, need to investigate.
1734 if ((BaseReg && LoadDepRegs.test(BaseReg)) ||
1735 (IndexReg && LoadDepRegs.test(IndexReg)))
1736 continue;
1737
1738 // If post-load hardening is enabled, this load is compatible with
1739 // post-load hardening, and we aren't already going to harden one of the
1740 // address registers, queue it up to be hardened post-load. Notably,
1741 // even once hardened this won't introduce a useful dependency that
1742 // could prune out subsequent loads.
1743 if (EnablePostLoadHardening && isDataInvariantLoad(MI) &&
1744 MI.getDesc().getNumDefs() == 1 && MI.getOperand(0).isReg() &&
1745 canHardenRegister(MI.getOperand(0).getReg()) &&
1746 !HardenedAddrRegs.count(BaseReg) &&
1747 !HardenedAddrRegs.count(IndexReg)) {
1748 HardenPostLoad.insert(&MI);
1749 HardenedAddrRegs.insert(MI.getOperand(0).getReg());
1750 continue;
1751 }
1752
1753 // Record this instruction for address hardening and record its register
1754 // operands as being address-hardened.
1755 HardenLoadAddr.insert(&MI);
1756 if (BaseReg)
1757 HardenedAddrRegs.insert(BaseReg);
1758 if (IndexReg)
1759 HardenedAddrRegs.insert(IndexReg);
1760
1761 for (MachineOperand &Def : MI.defs())
1762 if (Def.isReg())
1763 LoadDepRegs.set(Def.getReg());
1764 }
1765
1766 // Now re-walk the instructions in the basic block, and apply whichever
1767 // hardening strategy we have elected. Note that we do this in a second
1768 // pass specifically so that we have the complete set of instructions for
1769 // which we will do post-load hardening and can defer it in certain
1770 // circumstances.
1771 for (MachineInstr &MI : MBB) {
1772 if (HardenLoads) {
3
Assuming the condition is true
4
Taking true branch
1773 // We cannot both require hardening the def of a load and its address.
1774 assert(!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) &&((!(HardenLoadAddr.count(&MI) && HardenPostLoad.count
(&MI)) && "Requested to harden both the address and def of a load!"
) ? static_cast<void> (0) : __assert_fail ("!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) && \"Requested to harden both the address and def of a load!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1775, __PRETTY_FUNCTION__))
1775 "Requested to harden both the address and def of a load!")((!(HardenLoadAddr.count(&MI) && HardenPostLoad.count
(&MI)) && "Requested to harden both the address and def of a load!"
) ? static_cast<void> (0) : __assert_fail ("!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) && \"Requested to harden both the address and def of a load!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1775, __PRETTY_FUNCTION__))
;
1776
1777 // Check if this is a load whose address needs to be hardened.
1778 if (HardenLoadAddr.erase(&MI)) {
5
Assuming the condition is false
6
Taking false branch
1779 const MCInstrDesc &Desc = MI.getDesc();
1780 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1781 assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!")((MemRefBeginIdx >= 0 && "Cannot have an invalid index here!"
) ? static_cast<void> (0) : __assert_fail ("MemRefBeginIdx >= 0 && \"Cannot have an invalid index here!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1781, __PRETTY_FUNCTION__))
;
1782
1783 MemRefBeginIdx += X86II::getOperandBias(Desc);
1784
1785 MachineOperand &BaseMO =
1786 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1787 MachineOperand &IndexMO =
1788 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1789 hardenLoadAddr(MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1790 continue;
1791 }
1792
1793 // Test if this instruction is one of our post load instructions (and
1794 // remove it from the set if so).
1795 if (HardenPostLoad.erase(&MI)) {
7
Assuming the condition is false
8
Taking false branch
1796 assert(!MI.isCall() && "Must not try to post-load harden a call!")((!MI.isCall() && "Must not try to post-load harden a call!"
) ? static_cast<void> (0) : __assert_fail ("!MI.isCall() && \"Must not try to post-load harden a call!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1796, __PRETTY_FUNCTION__))
;
1797
1798 // If this is a data-invariant load, we want to try and sink any
1799 // hardening as far as possible.
1800 if (isDataInvariantLoad(MI)) {
1801 // Sink the instruction we'll need to harden as far as we can down
1802 // the graph.
1803 MachineInstr *SunkMI = sinkPostLoadHardenedInst(MI, HardenPostLoad);
1804
1805 // If we managed to sink this instruction, update everything so we
1806 // harden that instruction when we reach it in the instruction
1807 // sequence.
1808 if (SunkMI != &MI) {
1809 // If in sinking there was no instruction needing to be hardened,
1810 // we're done.
1811 if (!SunkMI)
1812 continue;
1813
1814 // Otherwise, add this to the set of defs we harden.
1815 HardenPostLoad.insert(SunkMI);
1816 continue;
1817 }
1818 }
1819
1820 unsigned HardenedReg = hardenPostLoad(MI);
1821
1822 // Mark the resulting hardened register as such so we don't re-harden.
1823 AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1824
1825 continue;
1826 }
1827
1828 // Check for an indirect call or branch that may need its input hardened
1829 // even if we couldn't find the specific load used, or were able to
1830 // avoid hardening it for some reason. Note that here we cannot break
1831 // out afterward as we may still need to handle any call aspect of this
1832 // instruction.
1833 if ((MI.isCall() || MI.isBranch()) && HardenIndirectCallsAndJumps)
9
Assuming the condition is true
10
Assuming the condition is true
11
Taking true branch
1834 hardenIndirectCallOrJumpInstr(MI, AddrRegToHardenedReg);
12
Calling 'X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr'
1835 }
1836
1837 // After we finish hardening loads we handle interprocedural hardening if
1838 // enabled and relevant for this instruction.
1839 if (!HardenInterprocedurally)
1840 continue;
1841 if (!MI.isCall() && !MI.isReturn())
1842 continue;
1843
1844 // If this is a direct return (IE, not a tail call) just directly harden
1845 // it.
1846 if (MI.isReturn() && !MI.isCall()) {
1847 hardenReturnInstr(MI);
1848 continue;
1849 }
1850
1851 // Otherwise we have a call. We need to handle transferring the predicate
1852 // state into a call and recovering it after the call returns (unless this
1853 // is a tail call).
1854 assert(MI.isCall() && "Should only reach here for calls!")((MI.isCall() && "Should only reach here for calls!")
? static_cast<void> (0) : __assert_fail ("MI.isCall() && \"Should only reach here for calls!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1854, __PRETTY_FUNCTION__))
;
1855 tracePredStateThroughCall(MI);
1856 }
1857
1858 HardenPostLoad.clear();
1859 HardenLoadAddr.clear();
1860 HardenedAddrRegs.clear();
1861 AddrRegToHardenedReg.clear();
1862
1863 // Currently, we only track data-dependent loads within a basic block.
1864 // FIXME: We should see if this is necessary or if we could be more
1865 // aggressive here without opening up attack avenues.
1866 LoadDepRegs.clear();
1867 }
1868}
1869
1870/// Save EFLAGS into the returned GPR. This can in turn be restored with
1871/// `restoreEFLAGS`.
1872///
1873/// Note that LLVM can only lower very simple patterns of saved and restored
1874/// EFLAGS registers. The restore should always be within the same basic block
1875/// as the save so that no PHI nodes are inserted.
1876unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
1877 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1878 DebugLoc Loc) {
1879 // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
1880 // what instruction selection does.
1881 unsigned Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
1882 // We directly copy the FLAGS register and rely on later lowering to clean
1883 // this up into the appropriate setCC instructions.
1884 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
1885 ++NumInstsInserted;
1886 return Reg;
1887}
1888
1889/// Restore EFLAGS from the provided GPR. This should be produced by
1890/// `saveEFLAGS`.
1891///
1892/// This must be done within the same basic block as the save in order to
1893/// reliably lower.
1894void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
1895 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
1896 unsigned Reg) {
1897 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
1898 ++NumInstsInserted;
1899}
1900
1901/// Takes the current predicate state (in a register) and merges it into the
1902/// stack pointer. The state is essentially a single bit, but we merge this in
1903/// a way that won't form non-canonical pointers and also will be preserved
1904/// across normal stack adjustments.
1905void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
1906 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
1907 unsigned PredStateReg) {
1908 unsigned TmpReg = MRI->createVirtualRegister(PS->RC);
1909 // FIXME: This hard codes a shift distance based on the number of bits needed
1910 // to stay canonical on 64-bit. We should compute this somehow and support
1911 // 32-bit as part of that.
1912 auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg)
1913 .addReg(PredStateReg, RegState::Kill)
1914 .addImm(47);
1915 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1916 ++NumInstsInserted;
1917 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP)
1918 .addReg(X86::RSP)
1919 .addReg(TmpReg, RegState::Kill);
1920 OrI->addRegisterDead(X86::EFLAGS, TRI);
1921 ++NumInstsInserted;
1922}
1923
1924/// Extracts the predicate state stored in the high bits of the stack pointer.
1925unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
1926 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1927 DebugLoc Loc) {
1928 unsigned PredStateReg = MRI->createVirtualRegister(PS->RC);
1929 unsigned TmpReg = MRI->createVirtualRegister(PS->RC);
1930
1931 // We know that the stack pointer will have any preserved predicate state in
1932 // its high bit. We just want to smear this across the other bits. Turns out,
1933 // this is exactly what an arithmetic right shift does.
1934 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg)
1935 .addReg(X86::RSP);
1936 auto ShiftI =
1937 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg)
1938 .addReg(TmpReg, RegState::Kill)
1939 .addImm(TRI->getRegSizeInBits(*PS->RC) - 1);
1940 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1941 ++NumInstsInserted;
1942
1943 return PredStateReg;
1944}
1945
1946void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
1947 MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
1948 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
1949 MachineBasicBlock &MBB = *MI.getParent();
1950 DebugLoc Loc = MI.getDebugLoc();
1951
1952 // Check if EFLAGS are alive by seeing if there is a def of them or they
1953 // live-in, and then seeing if that def is in turn used.
1954 bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI);
1955
1956 SmallVector<MachineOperand *, 2> HardenOpRegs;
1957
1958 if (BaseMO.isFI()) {
1959 // A frame index is never a dynamically controllable load, so only
1960 // harden it if we're covering fixed address loads as well.
1961 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Skipping hardening base of explicit stack frame load: "
; MI.dump(); dbgs() << "\n"; } } while (false)
1962 dbgs() << " Skipping hardening base of explicit stack frame load: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Skipping hardening base of explicit stack frame load: "
; MI.dump(); dbgs() << "\n"; } } while (false)
1963 MI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Skipping hardening base of explicit stack frame load: "
; MI.dump(); dbgs() << "\n"; } } while (false)
;
1964 } else if (BaseMO.getReg() == X86::RIP ||
1965 BaseMO.getReg() == X86::NoRegister) {
1966 // For both RIP-relative addressed loads or absolute loads, we cannot
1967 // meaningfully harden them because the address being loaded has no
1968 // dynamic component.
1969 //
1970 // FIXME: When using a segment base (like TLS does) we end up with the
1971 // dynamic address being the base plus -1 because we can't mutate the
1972 // segment register here. This allows the signed 32-bit offset to point at
1973 // valid segment-relative addresses and load them successfully.
1974 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Cannot harden base of " <<
(BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base") <<
" address in a load!"; } } while (false)
1975 dbgs() << " Cannot harden base of "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Cannot harden base of " <<
(BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base") <<
" address in a load!"; } } while (false)
1976 << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Cannot harden base of " <<
(BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base") <<
" address in a load!"; } } while (false)
1977 << " address in a load!")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Cannot harden base of " <<
(BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base") <<
" address in a load!"; } } while (false)
;
1978 } else {
1979 assert(BaseMO.isReg() &&((BaseMO.isReg() && "Only allowed to have a frame index or register base."
) ? static_cast<void> (0) : __assert_fail ("BaseMO.isReg() && \"Only allowed to have a frame index or register base.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1980, __PRETTY_FUNCTION__))
1980 "Only allowed to have a frame index or register base.")((BaseMO.isReg() && "Only allowed to have a frame index or register base."
) ? static_cast<void> (0) : __assert_fail ("BaseMO.isReg() && \"Only allowed to have a frame index or register base.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1980, __PRETTY_FUNCTION__))
;
1981 HardenOpRegs.push_back(&BaseMO);
1982 }
1983
1984 if (IndexMO.getReg() != X86::NoRegister &&
1985 (HardenOpRegs.empty() ||
1986 HardenOpRegs.front()->getReg() != IndexMO.getReg()))
1987 HardenOpRegs.push_back(&IndexMO);
1988
1989 assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&(((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
"Should have exactly one or two registers to harden!") ? static_cast
<void> (0) : __assert_fail ("(HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) && \"Should have exactly one or two registers to harden!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1990, __PRETTY_FUNCTION__))
1990 "Should have exactly one or two registers to harden!")(((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
"Should have exactly one or two registers to harden!") ? static_cast
<void> (0) : __assert_fail ("(HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) && \"Should have exactly one or two registers to harden!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1990, __PRETTY_FUNCTION__))
;
1991 assert((HardenOpRegs.size() == 1 ||(((HardenOpRegs.size() == 1 || HardenOpRegs[0]->getReg() !=
HardenOpRegs[1]->getReg()) && "Should not have two of the same registers!"
) ? static_cast<void> (0) : __assert_fail ("(HardenOpRegs.size() == 1 || HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) && \"Should not have two of the same registers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1993, __PRETTY_FUNCTION__))
1992 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&(((HardenOpRegs.size() == 1 || HardenOpRegs[0]->getReg() !=
HardenOpRegs[1]->getReg()) && "Should not have two of the same registers!"
) ? static_cast<void> (0) : __assert_fail ("(HardenOpRegs.size() == 1 || HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) && \"Should not have two of the same registers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1993, __PRETTY_FUNCTION__))
1993 "Should not have two of the same registers!")(((HardenOpRegs.size() == 1 || HardenOpRegs[0]->getReg() !=
HardenOpRegs[1]->getReg()) && "Should not have two of the same registers!"
) ? static_cast<void> (0) : __assert_fail ("(HardenOpRegs.size() == 1 || HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) && \"Should not have two of the same registers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 1993, __PRETTY_FUNCTION__))
;
1994
1995 // Remove any registers that have alreaded been checked.
1996 llvm::erase_if(HardenOpRegs, [&](MachineOperand *Op) {
1997 // See if this operand's register has already been checked.
1998 auto It = AddrRegToHardenedReg.find(Op->getReg());
1999 if (It == AddrRegToHardenedReg.end())
2000 // Not checked, so retain this one.
2001 return false;
2002
2003 // Otherwise, we can directly update this operand and remove it.
2004 Op->setReg(It->second);
2005 return true;
2006 });
2007 // If there are none left, we're done.
2008 if (HardenOpRegs.empty())
2009 return;
2010
2011 // Compute the current predicate state.
2012 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2013
2014 auto InsertPt = MI.getIterator();
2015
2016 // If EFLAGS are live and we don't have access to instructions that avoid
2017 // clobbering EFLAGS we need to save and restore them. This in turn makes
2018 // the EFLAGS no longer live.
2019 unsigned FlagsReg = 0;
2020 if (EFLAGSLive && !Subtarget->hasBMI2()) {
2021 EFLAGSLive = false;
2022 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2023 }
2024
2025 for (MachineOperand *Op : HardenOpRegs) {
2026 unsigned OpReg = Op->getReg();
2027 auto *OpRC = MRI->getRegClass(OpReg);
2028 unsigned TmpReg = MRI->createVirtualRegister(OpRC);
2029
2030 // If this is a vector register, we'll need somewhat custom logic to handle
2031 // hardening it.
2032 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
2033 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
2034 assert(Subtarget->hasAVX2() && "AVX2-specific register classes!")((Subtarget->hasAVX2() && "AVX2-specific register classes!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget->hasAVX2() && \"AVX2-specific register classes!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2034, __PRETTY_FUNCTION__))
;
2035 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
2036
2037 // Move our state into a vector register.
2038 // FIXME: We could skip this at the cost of longer encodings with AVX-512
2039 // but that doesn't seem likely worth it.
2040 unsigned VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
2041 auto MovI =
2042 BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
2043 .addReg(StateReg);
2044 (void)MovI;
2045 ++NumInstsInserted;
2046 LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting mov: "; MovI->
dump(); dbgs() << "\n"; } } while (false)
;
2047
2048 // Broadcast it across the vector register.
2049 unsigned VBStateReg = MRI->createVirtualRegister(OpRC);
2050 auto BroadcastI = BuildMI(MBB, InsertPt, Loc,
2051 TII->get(Is128Bit ? X86::VPBROADCASTQrr
2052 : X86::VPBROADCASTQYrr),
2053 VBStateReg)
2054 .addReg(VStateReg);
2055 (void)BroadcastI;
2056 ++NumInstsInserted;
2057 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting broadcast: "; BroadcastI
->dump(); dbgs() << "\n"; } } while (false)
2058 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting broadcast: "; BroadcastI
->dump(); dbgs() << "\n"; } } while (false)
;
2059
2060 // Merge our potential poison state into the value with a vector or.
2061 auto OrI =
2062 BuildMI(MBB, InsertPt, Loc,
2063 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
2064 .addReg(VBStateReg)
2065 .addReg(OpReg);
2066 (void)OrI;
2067 ++NumInstsInserted;
2068 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting or: "; OrI->dump
(); dbgs() << "\n"; } } while (false)
;
2069 } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
2070 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
2071 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
2072 assert(Subtarget->hasAVX512() && "AVX512-specific register classes!")((Subtarget->hasAVX512() && "AVX512-specific register classes!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget->hasAVX512() && \"AVX512-specific register classes!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2072, __PRETTY_FUNCTION__))
;
2073 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
2074 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
2075 if (Is128Bit || Is256Bit)
2076 assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!")((Subtarget->hasVLX() && "AVX512VL-specific register classes!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget->hasVLX() && \"AVX512VL-specific register classes!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2076, __PRETTY_FUNCTION__))
;
2077
2078 // Broadcast our state into a vector register.
2079 unsigned VStateReg = MRI->createVirtualRegister(OpRC);
2080 unsigned BroadcastOp =
2081 Is128Bit ? X86::VPBROADCASTQrZ128r
2082 : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr;
2083 auto BroadcastI =
2084 BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
2085 .addReg(StateReg);
2086 (void)BroadcastI;
2087 ++NumInstsInserted;
2088 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting broadcast: "; BroadcastI
->dump(); dbgs() << "\n"; } } while (false)
2089 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting broadcast: "; BroadcastI
->dump(); dbgs() << "\n"; } } while (false)
;
2090
2091 // Merge our potential poison state into the value with a vector or.
2092 unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
2093 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
2094 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
2095 .addReg(VStateReg)
2096 .addReg(OpReg);
2097 (void)OrI;
2098 ++NumInstsInserted;
2099 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting or: "; OrI->dump
(); dbgs() << "\n"; } } while (false)
;
2100 } else {
2101 // FIXME: Need to support GR32 here for 32-bit code.
2102 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&((OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
"Not a supported register class for address hardening!") ? static_cast
<void> (0) : __assert_fail ("OpRC->hasSuperClassEq(&X86::GR64RegClass) && \"Not a supported register class for address hardening!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2103, __PRETTY_FUNCTION__))
2103 "Not a supported register class for address hardening!")((OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
"Not a supported register class for address hardening!") ? static_cast
<void> (0) : __assert_fail ("OpRC->hasSuperClassEq(&X86::GR64RegClass) && \"Not a supported register class for address hardening!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2103, __PRETTY_FUNCTION__))
;
2104
2105 if (!EFLAGSLive) {
2106 // Merge our potential poison state into the value with an or.
2107 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
2108 .addReg(StateReg)
2109 .addReg(OpReg);
2110 OrI->addRegisterDead(X86::EFLAGS, TRI);
2111 ++NumInstsInserted;
2112 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting or: "; OrI->dump
(); dbgs() << "\n"; } } while (false)
;
2113 } else {
2114 // We need to avoid touching EFLAGS so shift out all but the least
2115 // significant bit using the instruction that doesn't update flags.
2116 auto ShiftI =
2117 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
2118 .addReg(OpReg)
2119 .addReg(StateReg);
2120 (void)ShiftI;
2121 ++NumInstsInserted;
2122 LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting shrx: "; ShiftI->
dump(); dbgs() << "\n"; } } while (false)
2123 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting shrx: "; ShiftI->
dump(); dbgs() << "\n"; } } while (false)
;
2124 }
2125 }
2126
2127 // Record this register as checked and update the operand.
2128 assert(!AddrRegToHardenedReg.count(Op->getReg()) &&((!AddrRegToHardenedReg.count(Op->getReg()) && "Should not have checked this register yet!"
) ? static_cast<void> (0) : __assert_fail ("!AddrRegToHardenedReg.count(Op->getReg()) && \"Should not have checked this register yet!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2129, __PRETTY_FUNCTION__))
2129 "Should not have checked this register yet!")((!AddrRegToHardenedReg.count(Op->getReg()) && "Should not have checked this register yet!"
) ? static_cast<void> (0) : __assert_fail ("!AddrRegToHardenedReg.count(Op->getReg()) && \"Should not have checked this register yet!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2129, __PRETTY_FUNCTION__))
;
2130 AddrRegToHardenedReg[Op->getReg()] = TmpReg;
2131 Op->setReg(TmpReg);
2132 ++NumAddrRegsHardened;
2133 }
2134
2135 // And restore the flags if needed.
2136 if (FlagsReg)
2137 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2138}
2139
2140MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
2141 MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) {
2142 assert(isDataInvariantLoad(InitialMI) &&((isDataInvariantLoad(InitialMI) && "Cannot get here with a non-invariant load!"
) ? static_cast<void> (0) : __assert_fail ("isDataInvariantLoad(InitialMI) && \"Cannot get here with a non-invariant load!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2143, __PRETTY_FUNCTION__))
2143 "Cannot get here with a non-invariant load!")((isDataInvariantLoad(InitialMI) && "Cannot get here with a non-invariant load!"
) ? static_cast<void> (0) : __assert_fail ("isDataInvariantLoad(InitialMI) && \"Cannot get here with a non-invariant load!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2143, __PRETTY_FUNCTION__))
;
2144
2145 // See if we can sink hardening the loaded value.
2146 auto SinkCheckToSingleUse =
2147 [&](MachineInstr &MI) -> Optional<MachineInstr *> {
2148 unsigned DefReg = MI.getOperand(0).getReg();
2149
2150 // We need to find a single use which we can sink the check. We can
2151 // primarily do this because many uses may already end up checked on their
2152 // own.
2153 MachineInstr *SingleUseMI = nullptr;
2154 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) {
2155 // If we're already going to harden this use, it is data invariant and
2156 // within our block.
2157 if (HardenedInstrs.count(&UseMI)) {
2158 if (!isDataInvariantLoad(UseMI)) {
2159 // If we've already decided to harden a non-load, we must have sunk
2160 // some other post-load hardened instruction to it and it must itself
2161 // be data-invariant.
2162 assert(isDataInvariant(UseMI) &&((isDataInvariant(UseMI) && "Data variant instruction being hardened!"
) ? static_cast<void> (0) : __assert_fail ("isDataInvariant(UseMI) && \"Data variant instruction being hardened!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2163, __PRETTY_FUNCTION__))
2163 "Data variant instruction being hardened!")((isDataInvariant(UseMI) && "Data variant instruction being hardened!"
) ? static_cast<void> (0) : __assert_fail ("isDataInvariant(UseMI) && \"Data variant instruction being hardened!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2163, __PRETTY_FUNCTION__))
;
2164 continue;
2165 }
2166
2167 // Otherwise, this is a load and the load component can't be data
2168 // invariant so check how this register is being used.
2169 const MCInstrDesc &Desc = UseMI.getDesc();
2170 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
2171 assert(MemRefBeginIdx >= 0 &&((MemRefBeginIdx >= 0 && "Should always have mem references here!"
) ? static_cast<void> (0) : __assert_fail ("MemRefBeginIdx >= 0 && \"Should always have mem references here!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2172, __PRETTY_FUNCTION__))
2172 "Should always have mem references here!")((MemRefBeginIdx >= 0 && "Should always have mem references here!"
) ? static_cast<void> (0) : __assert_fail ("MemRefBeginIdx >= 0 && \"Should always have mem references here!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2172, __PRETTY_FUNCTION__))
;
2173 MemRefBeginIdx += X86II::getOperandBias(Desc);
2174
2175 MachineOperand &BaseMO =
2176 UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
2177 MachineOperand &IndexMO =
2178 UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
2179 if ((BaseMO.isReg() && BaseMO.getReg() == DefReg) ||
2180 (IndexMO.isReg() && IndexMO.getReg() == DefReg))
2181 // The load uses the register as part of its address making it not
2182 // invariant.
2183 return {};
2184
2185 continue;
2186 }
2187
2188 if (SingleUseMI)
2189 // We already have a single use, this would make two. Bail.
2190 return {};
2191
2192 // If this single use isn't data invariant, isn't in this block, or has
2193 // interfering EFLAGS, we can't sink the hardening to it.
2194 if (!isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent())
2195 return {};
2196
2197 // If this instruction defines multiple registers bail as we won't harden
2198 // all of them.
2199 if (UseMI.getDesc().getNumDefs() > 1)
2200 return {};
2201
2202 // If this register isn't a virtual register we can't walk uses of sanely,
2203 // just bail. Also check that its register class is one of the ones we
2204 // can harden.
2205 unsigned UseDefReg = UseMI.getOperand(0).getReg();
2206 if (!TRI->isVirtualRegister(UseDefReg) ||
2207 !canHardenRegister(UseDefReg))
2208 return {};
2209
2210 SingleUseMI = &UseMI;
2211 }
2212
2213 // If SingleUseMI is still null, there is no use that needs its own
2214 // checking. Otherwise, it is the single use that needs checking.
2215 return {SingleUseMI};
2216 };
2217
2218 MachineInstr *MI = &InitialMI;
2219 while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
2220 // Update which MI we're checking now.
2221 MI = *SingleUse;
2222 if (!MI)
2223 break;
2224 }
2225
2226 return MI;
2227}
2228
2229bool X86SpeculativeLoadHardeningPass::canHardenRegister(unsigned Reg) {
2230 auto *RC = MRI->getRegClass(Reg);
2231 int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
2232 if (RegBytes > 8)
20
Assuming 'RegBytes' is <= 8
21
Taking false branch
2233 // We don't support post-load hardening of vectors.
2234 return false;
2235
2236 // If this register class is explicitly constrained to a class that doesn't
2237 // require REX prefix, we may not be able to satisfy that constraint when
2238 // emitting the hardening instructions, so bail out here.
2239 // FIXME: This seems like a pretty lame hack. The way this comes up is when we
2240 // end up both with a NOREX and REX-only register as operands to the hardening
2241 // instructions. It would be better to fix that code to handle this situation
2242 // rather than hack around it in this way.
2243 const TargetRegisterClass *NOREXRegClasses[] = {
2244 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
2245 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
2246 if (RC == NOREXRegClasses[Log2_32(RegBytes)])
22
The right operand of '==' is a garbage value due to array index out of bounds
2247 return false;
2248
2249 const TargetRegisterClass *GPRRegClasses[] = {
2250 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
2251 &X86::GR64RegClass};
2252 return RC->hasSuperClassEq(GPRRegClasses[Log2_32(RegBytes)]);
2253}
2254
2255/// Harden a value in a register.
2256///
2257/// This is the low-level logic to fully harden a value sitting in a register
2258/// against leaking during speculative execution.
2259///
2260/// Unlike hardening an address that is used by a load, this routine is required
2261/// to hide *all* incoming bits in the register.
2262///
2263/// `Reg` must be a virtual register. Currently, it is required to be a GPR no
2264/// larger than the predicate state register. FIXME: We should support vector
2265/// registers here by broadcasting the predicate state.
2266///
2267/// The new, hardened virtual register is returned. It will have the same
2268/// register class as `Reg`.
2269unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
2270 unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
2271 DebugLoc Loc) {
2272 assert(canHardenRegister(Reg) && "Cannot harden this register!")((canHardenRegister(Reg) && "Cannot harden this register!"
) ? static_cast<void> (0) : __assert_fail ("canHardenRegister(Reg) && \"Cannot harden this register!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2272, __PRETTY_FUNCTION__))
;
19
Within the expansion of the macro 'assert':
a
Calling 'X86SpeculativeLoadHardeningPass::canHardenRegister'
2273 assert(TRI->isVirtualRegister(Reg) && "Cannot harden a physical register!")((TRI->isVirtualRegister(Reg) && "Cannot harden a physical register!"
) ? static_cast<void> (0) : __assert_fail ("TRI->isVirtualRegister(Reg) && \"Cannot harden a physical register!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2273, __PRETTY_FUNCTION__))
;
2274
2275 auto *RC = MRI->getRegClass(Reg);
2276 int Bytes = TRI->getRegSizeInBits(*RC) / 8;
2277
2278 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2279
2280 // FIXME: Need to teach this about 32-bit mode.
2281 if (Bytes != 8) {
2282 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
2283 unsigned SubRegImm = SubRegImms[Log2_32(Bytes)];
2284 unsigned NarrowStateReg = MRI->createVirtualRegister(RC);
2285 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg)
2286 .addReg(StateReg, 0, SubRegImm);
2287 StateReg = NarrowStateReg;
2288 }
2289
2290 unsigned FlagsReg = 0;
2291 if (isEFLAGSLive(MBB, InsertPt, *TRI))
2292 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2293
2294 unsigned NewReg = MRI->createVirtualRegister(RC);
2295 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
2296 unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)];
2297 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg)
2298 .addReg(StateReg)
2299 .addReg(Reg);
2300 OrI->addRegisterDead(X86::EFLAGS, TRI);
2301 ++NumInstsInserted;
2302 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting or: "; OrI->dump
(); dbgs() << "\n"; } } while (false)
;
2303
2304 if (FlagsReg)
2305 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2306
2307 return NewReg;
2308}
2309
2310/// Harden a load by hardening the loaded value in the defined register.
2311///
2312/// We can harden a non-leaking load into a register without touching the
2313/// address by just hiding all of the loaded bits during misspeculation. We use
2314/// an `or` instruction to do this because we set up our poison value as all
2315/// ones. And the goal is just for the loaded bits to not be exposed to
2316/// execution and coercing them to one is sufficient.
2317///
2318/// Returns the newly hardened register.
2319unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
2320 MachineBasicBlock &MBB = *MI.getParent();
2321 DebugLoc Loc = MI.getDebugLoc();
2322
2323 auto &DefOp = MI.getOperand(0);
2324 unsigned OldDefReg = DefOp.getReg();
2325 auto *DefRC = MRI->getRegClass(OldDefReg);
2326
2327 // Because we want to completely replace the uses of this def'ed value with
2328 // the hardened value, create a dedicated new register that will only be used
2329 // to communicate the unhardened value to the hardening.
2330 unsigned UnhardenedReg = MRI->createVirtualRegister(DefRC);
2331 DefOp.setReg(UnhardenedReg);
2332
2333 // Now harden this register's value, getting a hardened reg that is safe to
2334 // use. Note that we insert the instructions to compute this *after* the
2335 // defining instruction, not before it.
2336 unsigned HardenedReg = hardenValueInRegister(
2337 UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);
2338
2339 // Finally, replace the old register (which now only has the uses of the
2340 // original def) with the hardened register.
2341 MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg);
2342
2343 ++NumPostLoadRegsHardened;
2344 return HardenedReg;
2345}
2346
2347/// Harden a return instruction.
2348///
2349/// Returns implicitly perform a load which we need to harden. Without hardening
2350/// this load, an attacker my speculatively write over the return address to
2351/// steer speculation of the return to an attacker controlled address. This is
2352/// called Spectre v1.1 or Bounds Check Bypass Store (BCBS) and is described in
2353/// this paper:
2354/// https://people.csail.mit.edu/vlk/spectre11.pdf
2355///
2356/// We can harden this by introducing an LFENCE that will delay any load of the
2357/// return address until prior instructions have retired (and thus are not being
2358/// speculated), or we can harden the address used by the implicit load: the
2359/// stack pointer.
2360///
2361/// If we are not using an LFENCE, hardening the stack pointer has an additional
2362/// benefit: it allows us to pass the predicate state accumulated in this
2363/// function back to the caller. In the absence of a BCBS attack on the return,
2364/// the caller will typically be resumed and speculatively executed due to the
2365/// Return Stack Buffer (RSB) prediction which is very accurate and has a high
2366/// priority. It is possible that some code from the caller will be executed
2367/// speculatively even during a BCBS-attacked return until the steering takes
2368/// effect. Whenever this happens, the caller can recover the (poisoned)
2369/// predicate state from the stack pointer and continue to harden loads.
2370void X86SpeculativeLoadHardeningPass::hardenReturnInstr(MachineInstr &MI) {
2371 MachineBasicBlock &MBB = *MI.getParent();
2372 DebugLoc Loc = MI.getDebugLoc();
2373 auto InsertPt = MI.getIterator();
2374
2375 if (FenceCallAndRet)
2376 // No need to fence here as we'll fence at the return site itself. That
2377 // handles more cases than we can handle here.
2378 return;
2379
2380 // Take our predicate state, shift it to the high 17 bits (so that we keep
2381 // pointers canonical) and merge it into RSP. This will allow the caller to
2382 // extract it when we return (speculatively).
2383 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
2384}
2385
2386/// Trace the predicate state through a call.
2387///
2388/// There are several layers of this needed to handle the full complexity of
2389/// calls.
2390///
2391/// First, we need to send the predicate state into the called function. We do
2392/// this by merging it into the high bits of the stack pointer.
2393///
2394/// For tail calls, this is all we need to do.
2395///
2396/// For calls where we might return and resume the control flow, we need to
2397/// extract the predicate state from the high bits of the stack pointer after
2398/// control returns from the called function.
2399///
2400/// We also need to verify that we intended to return to this location in the
2401/// code. An attacker might arrange for the processor to mispredict the return
2402/// to this valid but incorrect return address in the program rather than the
2403/// correct one. See the paper on this attack, called "ret2spec" by the
2404/// researchers, here:
2405/// https://christian-rossow.de/publications/ret2spec-ccs2018.pdf
2406///
2407/// The way we verify that we returned to the correct location is by preserving
2408/// the expected return address across the call. One technique involves taking
2409/// advantage of the red-zone to load the return address from `8(%rsp)` where it
2410/// was left by the RET instruction when it popped `%rsp`. Alternatively, we can
2411/// directly save the address into a register that will be preserved across the
2412/// call. We compare this intended return address against the address
2413/// immediately following the call (the observed return address). If these
2414/// mismatch, we have detected misspeculation and can poison our predicate
2415/// state.
2416void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
2417 MachineInstr &MI) {
2418 MachineBasicBlock &MBB = *MI.getParent();
2419 MachineFunction &MF = *MBB.getParent();
2420 auto InsertPt = MI.getIterator();
2421 DebugLoc Loc = MI.getDebugLoc();
2422
2423 if (FenceCallAndRet) {
2424 if (MI.isReturn())
2425 // Tail call, we don't return to this function.
2426 // FIXME: We should also handle noreturn calls.
2427 return;
2428
2429 // We don't need to fence before the call because the function should fence
2430 // in its entry. However, we do need to fence after the call returns.
2431 // Fencing before the return doesn't correctly handle cases where the return
2432 // itself is mispredicted.
2433 BuildMI(MBB, std::next(InsertPt), Loc, TII->get(X86::LFENCE));
2434 ++NumInstsInserted;
2435 ++NumLFENCEsInserted;
2436 return;
2437 }
2438
2439 // First, we transfer the predicate state into the called function by merging
2440 // it into the stack pointer. This will kill the current def of the state.
2441 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2442 mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
2443
2444 // If this call is also a return, it is a tail call and we don't need anything
2445 // else to handle it so just return. Also, if there are no further
2446 // instructions and no successors, this call does not return so we can also
2447 // bail.
2448 if (MI.isReturn() || (std::next(InsertPt) == MBB.end() && MBB.succ_empty()))
2449 return;
2450
2451 // Create a symbol to track the return address and attach it to the call
2452 // machine instruction. We will lower extra symbols attached to call
2453 // instructions as label immediately following the call.
2454 MCSymbol *RetSymbol =
2455 MF.getContext().createTempSymbol("slh_ret_addr",
2456 /*AlwaysAddSuffix*/ true);
2457 MI.setPostInstrSymbol(MF, RetSymbol);
2458
2459 const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
2460 unsigned ExpectedRetAddrReg = 0;
2461
2462 // If we have no red zones or if the function returns twice (possibly without
2463 // using the `ret` instruction) like setjmp, we need to save the expected
2464 // return address prior to the call.
2465 if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone) ||
2466 MF.exposesReturnsTwice()) {
2467 // If we don't have red zones, we need to compute the expected return
2468 // address prior to the call and store it in a register that lives across
2469 // the call.
2470 //
2471 // In some ways, this is doubly satisfying as a mitigation because it will
2472 // also successfully detect stack smashing bugs in some cases (typically,
2473 // when a callee-saved register is used and the callee doesn't push it onto
2474 // the stack). But that isn't our primary goal, so we only use it as
2475 // a fallback.
2476 //
2477 // FIXME: It isn't clear that this is reliable in the face of
2478 // rematerialization in the register allocator. We somehow need to force
2479 // that to not occur for this particular instruction, and instead to spill
2480 // or otherwise preserve the value computed *prior* to the call.
2481 //
2482 // FIXME: It is even less clear why MachineCSE can't just fold this when we
2483 // end up having to use identical instructions both before and after the
2484 // call to feed the comparison.
2485 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2486 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
2487 !Subtarget->isPositionIndependent()) {
2488 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2489 .addSym(RetSymbol);
2490 } else {
2491 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ExpectedRetAddrReg)
2492 .addReg(/*Base*/ X86::RIP)
2493 .addImm(/*Scale*/ 1)
2494 .addReg(/*Index*/ 0)
2495 .addSym(RetSymbol)
2496 .addReg(/*Segment*/ 0);
2497 }
2498 }
2499
2500 // Step past the call to handle when it returns.
2501 ++InsertPt;
2502
2503 // If we didn't pre-compute the expected return address into a register, then
2504 // red zones are enabled and the return address is still available on the
2505 // stack immediately after the call. As the very first instruction, we load it
2506 // into a register.
2507 if (!ExpectedRetAddrReg) {
2508 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2509 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2510 .addReg(/*Base*/ X86::RSP)
2511 .addImm(/*Scale*/ 1)
2512 .addReg(/*Index*/ 0)
2513 .addImm(/*Displacement*/ -8) // The stack pointer has been popped, so
2514 // the return address is 8-bytes past it.
2515 .addReg(/*Segment*/ 0);
2516 }
2517
2518 // Now we extract the callee's predicate state from the stack pointer.
2519 unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
2520
2521 // Test the expected return address against our actual address. If we can
2522 // form this basic block's address as an immediate, this is easy. Otherwise
2523 // we compute it.
2524 if (MF.getTarget().getCodeModel() == CodeModel::Small &&
2525 !Subtarget->isPositionIndependent()) {
2526 // FIXME: Could we fold this with the load? It would require careful EFLAGS
2527 // management.
2528 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64ri32))
2529 .addReg(ExpectedRetAddrReg, RegState::Kill)
2530 .addSym(RetSymbol);
2531 } else {
2532 unsigned ActualRetAddrReg = MRI->createVirtualRegister(AddrRC);
2533 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg)
2534 .addReg(/*Base*/ X86::RIP)
2535 .addImm(/*Scale*/ 1)
2536 .addReg(/*Index*/ 0)
2537 .addSym(RetSymbol)
2538 .addReg(/*Segment*/ 0);
2539 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64rr))
2540 .addReg(ExpectedRetAddrReg, RegState::Kill)
2541 .addReg(ActualRetAddrReg, RegState::Kill);
2542 }
2543
2544 // Now conditionally update the predicate state we just extracted if we ended
2545 // up at a different return address than expected.
2546 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
2547 auto CMovOp = X86::getCMovFromCond(X86::COND_NE, PredStateSizeInBytes);
2548
2549 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
2550 auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg)
2551 .addReg(NewStateReg, RegState::Kill)
2552 .addReg(PS->PoisonReg);
2553 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
2554 ++NumInstsInserted;
2555 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-slh")) { dbgs() << " Inserting cmov: "; CMovI->
dump(); dbgs() << "\n"; } } while (false)
;
2556
2557 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
2558}
2559
2560/// An attacker may speculatively store over a value that is then speculatively
2561/// loaded and used as the target of an indirect call or jump instruction. This
2562/// is called Spectre v1.2 or Bounds Check Bypass Store (BCBS) and is described
2563/// in this paper:
2564/// https://people.csail.mit.edu/vlk/spectre11.pdf
2565///
2566/// When this happens, the speculative execution of the call or jump will end up
2567/// being steered to this attacker controlled address. While most such loads
2568/// will be adequately hardened already, we want to ensure that they are
2569/// definitively treated as needing post-load hardening. While address hardening
2570/// is sufficient to prevent secret data from leaking to the attacker, it may
2571/// not be sufficient to prevent an attacker from steering speculative
2572/// execution. We forcibly unfolded all relevant loads above and so will always
2573/// have an opportunity to post-load harden here, we just need to scan for cases
2574/// not already flagged and add them.
2575void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
2576 MachineInstr &MI,
2577 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
2578 switch (MI.getOpcode()) {
13
Control jumps to the 'default' case at line 2589
2579 case X86::FARCALL16m:
2580 case X86::FARCALL32m:
2581 case X86::FARCALL64:
2582 case X86::FARJMP16m:
2583 case X86::FARJMP32m:
2584 case X86::FARJMP64:
2585 // We don't need to harden either far calls or far jumps as they are
2586 // safe from Spectre.
2587 return;
2588
2589 default:
2590 break;
14
Execution continues on line 2595
2591 }
2592
2593 // We should never see a loading instruction at this point, as those should
2594 // have been unfolded.
2595 assert(!MI.mayLoad() && "Found a lingering loading instruction!")((!MI.mayLoad() && "Found a lingering loading instruction!"
) ? static_cast<void> (0) : __assert_fail ("!MI.mayLoad() && \"Found a lingering loading instruction!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/X86/X86SpeculativeLoadHardening.cpp"
, 2595, __PRETTY_FUNCTION__))
;
2596
2597 // If the first operand isn't a register, this is a branch or call
2598 // instruction with an immediate operand which doesn't need to be hardened.
2599 if (!MI.getOperand(0).isReg())
15
Taking false branch
2600 return;
2601
2602 // For all of these, the target register is the first operand of the
2603 // instruction.
2604 auto &TargetOp = MI.getOperand(0);
2605 unsigned OldTargetReg = TargetOp.getReg();
2606
2607 // Try to lookup a hardened version of this register. We retain a reference
2608 // here as we want to update the map to track any newly computed hardened
2609 // register.
2610 unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2611
2612 // If we don't have a hardened register yet, compute one. Otherwise, just use
2613 // the already hardened register.
2614 //
2615 // FIXME: It is a little suspect that we use partially hardened registers that
2616 // only feed addresses. The complexity of partial hardening with SHRX
2617 // continues to pile up. Should definitively measure its value and consider
2618 // eliminating it.
2619 if (!HardenedTargetReg)
16
Assuming the condition is true
17
Taking true branch
2620 HardenedTargetReg = hardenValueInRegister(
18
Calling 'X86SpeculativeLoadHardeningPass::hardenValueInRegister'
2621 OldTargetReg, *MI.getParent(), MI.getIterator(), MI.getDebugLoc());
2622
2623 // Set the target operand to the hardened register.
2624 TargetOp.setReg(HardenedTargetReg);
2625
2626 ++NumCallsOrJumpsHardened;
2627}
2628
2629INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, PASS_KEY,static void *initializeX86SpeculativeLoadHardeningPassPassOnce
(PassRegistry &Registry) {
2630 "X86 speculative load hardener", false, false)static void *initializeX86SpeculativeLoadHardeningPassPassOnce
(PassRegistry &Registry) {
2631INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass, PASS_KEY,PassInfo *PI = new PassInfo( "X86 speculative load hardener",
"x86-slh", &X86SpeculativeLoadHardeningPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86SpeculativeLoadHardeningPass
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeX86SpeculativeLoadHardeningPassPassFlag
; void llvm::initializeX86SpeculativeLoadHardeningPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeX86SpeculativeLoadHardeningPassPassFlag
, initializeX86SpeculativeLoadHardeningPassPassOnce, std::ref
(Registry)); }
2632 "X86 speculative load hardener", false, false)PassInfo *PI = new PassInfo( "X86 speculative load hardener",
"x86-slh", &X86SpeculativeLoadHardeningPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86SpeculativeLoadHardeningPass
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeX86SpeculativeLoadHardeningPassPassFlag
; void llvm::initializeX86SpeculativeLoadHardeningPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeX86SpeculativeLoadHardeningPassPassFlag
, initializeX86SpeculativeLoadHardeningPassPassOnce, std::ref
(Registry)); }
2633
2634FunctionPass *llvm::createX86SpeculativeLoadHardeningPass() {
2635 return new X86SpeculativeLoadHardeningPass();
2636}