Go to the documentation of this file.
64 #define PASS_KEY "x86-slh"
65 #define DEBUG_TYPE PASS_KEY
67 STATISTIC(NumCondBranchesTraced,
"Number of conditional branches traced");
68 STATISTIC(NumBranchesUntraced,
"Number of branches unable to trace");
70 "Number of address mode used registers hardaned");
72 "Number of post-load register values hardened");
74 "Number of calls or jumps requiring extra hardening");
75 STATISTIC(NumInstsInserted,
"Number of instructions inserted");
76 STATISTIC(NumLFENCEsInserted,
"Number of lfence instructions inserted");
79 "x86-speculative-load-hardening",
86 "Use LFENCE along each conditional edge to harden against speculative "
87 "loads rather than conditional movs and poisoned pointers."),
92 cl::desc(
"Harden the value loaded *after* it is loaded by "
93 "flushing the loaded bits to 1. This is hard to do "
94 "in general but can be done easily for GPRs."),
99 cl::desc(
"Use a full speculation fence to harden both call and ret edges "
100 "rather than a lighter weight mitigation."),
105 cl::desc(
"Harden interprocedurally by passing our state in and out of "
106 "functions in the high bits of the stack pointer."),
111 cl::desc(
"Sanitize loads from memory. When disable, no "
112 "significant security is provided."),
117 cl::desc(
"Harden indirect calls and jumps against using speculatively "
118 "stored attacker controlled addresses. This is designed to "
119 "mitigate Spectre v1.2 style attacks."),
129 return "X86 speculative load hardening";
140 struct BlockCondInfo {
152 unsigned InitialReg = 0;
153 unsigned PoisonReg = 0;
192 const DebugLoc &Loc,
unsigned PredStateReg);
211 void hardenIndirectCallOrJumpInstr(
220 void X86SpeculativeLoadHardeningPass::getAnalysisUsage(
229 assert(!Succ.
isEHPad() &&
"Shouldn't get edges to EH pads!");
243 "Didn't start with the right target!");
253 "Without an unconditional branch, the old layout successor should "
254 "be an actual successor!");
258 UncondBr = &*BrBuilder;
269 "Cannot have a branchless successor and an unconditional branch!");
271 "A non-branch successor must have been a layout successor before "
272 "and now is a layout successor of the new block.");
278 if (SuccCount == 1) {
291 for (
int OpIdx = 1, NumOps =
MI.getNumOperands(); OpIdx < NumOps;
295 assert(OpMBB.
isMBB() &&
"Block operand to a PHI is not a block!");
300 if (SuccCount == 1) {
306 MI.addOperand(MF, OpV);
313 for (
auto &LI : Succ.liveins())
317 << Succ.getName() <<
"'.\n");
333 for (
auto &
MI :
MBB) {
340 for (
int OpIdx = 1, NumOps =
MI.getNumOperands(); OpIdx < NumOps;
342 if (!Preds.
insert(
MI.getOperand(OpIdx + 1).getMBB()).second)
343 DupIndices.push_back(OpIdx);
356 while (!DupIndices.empty()) {
360 MI.removeOperand(OpIdx + 1);
361 MI.removeOperand(OpIdx);
379 if (
MI.getOpcode() == X86::LFENCE)
399 bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
412 TII = Subtarget->getInstrInfo();
413 TRI = Subtarget->getRegisterInfo();
416 PS.emplace(MF, &X86::GR64_NOSPRegClass);
424 hardenEdgesWithLFENCE(MF);
432 auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin());
442 if (!HasVulnerableLoad && Infos.empty())
447 const int PoisonVal = -1;
449 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::MOV64ri32), PS->PoisonReg)
462 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::LFENCE));
464 ++NumLFENCEsInserted;
477 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
483 auto ZeroI =
BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::MOV32r0),
487 ZeroI->findRegisterDefOperand(X86::EFLAGS);
489 "Must have an implicit def of EFLAGS!");
491 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::SUBREG_TO_REG),
505 PS->SSA.Initialize(PS->InitialReg);
506 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
509 auto CMovs = tracePredStateThroughCFG(MF, Infos);
525 PS->SSA.AddAvailableValue(
534 unfoldCallAndJumpLoads(MF);
537 auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
538 CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
544 tracePredStateThroughBlocksAndHarden(MF);
550 if (!
Op.isReg() ||
Op.getReg() != PS->InitialReg)
553 PS->SSA.RewriteUse(
Op);
556 LLVM_DEBUG(
dbgs() <<
"Final speculative load hardened function:\n"; MF.dump();
557 dbgs() <<
"\n"; MF.verify(
this));
567 void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE(
580 if (TermIt ==
MBB.
end() || !TermIt->isBranch())
587 if (!SuccMBB->isEHPad())
595 ++NumLFENCEsInserted;
600 X86SpeculativeLoadHardeningPass::collectBlockCondInfo(
MachineFunction &MF) {
630 BlockCondInfo
Info = {&
MBB, {},
nullptr};
636 if (!
MI.isTerminator())
640 if (!
MI.isBranch()) {
641 Info.CondBrs.clear();
647 if (
MI.getOpcode() == X86::JMP_1) {
648 Info.CondBrs.clear();
663 Info.CondBrs.clear();
669 Info.CondBrs.push_back(&
MI);
671 if (
Info.CondBrs.empty()) {
672 ++NumBranchesUntraced;
673 LLVM_DEBUG(
dbgs() <<
"WARNING: unable to secure successors of block:\n";
678 Infos.push_back(
Info);
692 X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
700 for (
const BlockCondInfo &
Info : Infos) {
707 ++NumCondBranchesTraced;
712 UncondBr ? (UncondBr->
getOpcode() == X86::JMP_1
720 ++SuccCounts[UncondSucc];
721 for (
auto *CondBr : CondBrs)
722 ++SuccCounts[CondBr->getOperand(0).getMBB()];
726 auto BuildCheckingBlockForSuccAndConds =
733 (SuccCount == 1 && Succ.pred_size() == 1)
737 bool LiveEFLAGS = Succ.
isLiveIn(X86::EFLAGS);
739 CheckingMBB.addLiveIn(X86::EFLAGS);
742 auto InsertPt = CheckingMBB.begin();
743 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
744 "Should never have a PHI in the initial checking block as it "
745 "always has a single predecessor!");
749 unsigned CurStateReg = PS->InitialReg;
759 TII->get(CMovOp), UpdatedStateReg)
765 if (!LiveEFLAGS &&
Cond == Conds.back())
774 if (CurStateReg == PS->InitialReg)
775 CMovs.push_back(&*CMovI);
778 CurStateReg = UpdatedStateReg;
783 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
786 std::vector<X86::CondCode> UncondCodeSeq;
787 for (
auto *CondBr : CondBrs) {
789 int &SuccCount = SuccCounts[&Succ];
793 UncondCodeSeq.push_back(
Cond);
795 BuildCheckingBlockForSuccAndConds(
MBB, Succ, SuccCount, CondBr, UncondBr,
818 assert(SuccCounts[UncondSucc] == 1 &&
819 "We should never have more than one edge to the unconditional "
820 "successor at this point because every other edge must have been "
825 UncondCodeSeq.erase(
std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
826 UncondCodeSeq.end());
829 BuildCheckingBlockForSuccAndConds(
MBB, *UncondSucc, 1,
830 UncondBr, UncondBr, UncondCodeSeq);
845 unsigned UnfoldedOpc =
TII.getOpcodeAfterMemoryUnfold(
846 Opcode,
true,
false, &
Index);
848 return TII.getRegClass(MCID,
Index, &
TII.getRegisterInfo(), MF);
851 void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
858 if (!
MI.isCall() && !
MI.isBranch())
864 switch (
MI.getOpcode()) {
867 dbgs() <<
"ERROR: Found an unexpected loading branch or call "
873 case X86::FARCALL16m:
874 case X86::FARCALL32m:
875 case X86::FARCALL64m:
884 case X86::CALL16m_NT:
886 case X86::CALL32m_NT:
888 case X86::CALL64m_NT:
895 case X86::TAILJMPm64:
896 case X86::TAILJMPm64_REX:
898 case X86::TCRETURNmi64:
899 case X86::TCRETURNmi: {
906 <<
"ERROR: Unable to unfold load from instruction:\n";
915 TII->unfoldMemoryOperand(MF,
MI,
Reg,
true,
919 "Computed unfolded register class but failed to unfold");
921 for (
auto *NewMI : NewMIs)
925 if (
MI.isCandidateForCallSiteEntry())
926 MF.eraseCallSiteInfo(&
MI);
928 MI.eraseFromParent();
930 dbgs() <<
"Unfolded load successfully into:\n";
931 for (
auto *NewMI : NewMIs) {
961 X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
1000 case X86::FARJMP32m:
1001 case X86::FARJMP64m:
1007 case X86::JMP16m_NT:
1009 case X86::JMP32m_NT:
1011 case X86::JMP64m_NT:
1017 "Support for 16-bit indirect branches is not implemented.");
1020 "Support for 32-bit indirect branches is not implemented.");
1029 return !OtherTI.isDebugInstr() && &OtherTI != &TI;
1032 dbgs() <<
"ERROR: Found other terminators in a block with an indirect "
1033 "branch! This is not yet supported! Terminator sequence:\n";
1043 TargetAddrSSA.AddAvailableValue(&
MBB, TargetReg);
1048 IndirectTargetMBBs.
insert(Succ);
1055 if (IndirectTargetMBBs.
empty())
1063 if (!IndirectTargetMBBs.
count(&
MBB))
1070 "Unexpected EH pad as target of an indirect branch!");
1078 "Cannot check within a block that already has live-in EFLAGS!");
1085 if (IndirectTerminatedMBBs.
count(Pred))
1094 return Succ->isEHPad() || Succ == &MBB;
1097 dbgs() <<
"ERROR: Found conditional entry to target of indirect "
1103 "an indirect branch!");
1109 auto InsertPt = Pred->getFirstTerminator();
1112 !Subtarget->isPositionIndependent()) {
1115 TII->get(X86::MOV64ri32), TargetReg)
1135 TargetAddrSSA.AddAvailableValue(Pred, TargetReg);
1143 Register TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&
MBB);
1151 !Subtarget->isPositionIndependent()) {
1192 CMovs.push_back(&*CMovI);
1196 PS->SSA.AddAvailableValue(&
MBB, UpdatedStateReg);
1206 if (
const MachineOperand *DefOp =
MI.findRegisterDefOperand(X86::EFLAGS)) {
1207 return !DefOp->isDead();
1219 if (DefOp->isDead())
1227 if (
MI.killsRegister(X86::EFLAGS, &
TRI))
1263 void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
1300 return Op.isReg() && LoadDepRegs.test(Op.getReg());
1304 LoadDepRegs.
set(
Def.getReg());
1309 if (
MI.getOpcode() == X86::LFENCE)
1327 if (MemRefBeginIdx < 0) {
1329 <<
"WARNING: unable to harden loading instruction: ";
1343 unsigned BaseReg = 0, IndexReg = 0;
1344 if (!BaseMO.
isFI() && BaseMO.
getReg() != X86::RIP &&
1345 BaseMO.
getReg() != X86::NoRegister)
1346 BaseReg = BaseMO.
getReg();
1347 if (IndexMO.
getReg() != X86::NoRegister)
1348 IndexReg = IndexMO.
getReg();
1350 if (!BaseReg && !IndexReg)
1358 if ((BaseReg && LoadDepRegs.
test(BaseReg)) ||
1359 (IndexReg && LoadDepRegs.
test(IndexReg)))
1369 MI.getOperand(0).isReg() &&
1370 canHardenRegister(
MI.getOperand(0).getReg()) &&
1371 !HardenedAddrRegs.
count(BaseReg) &&
1372 !HardenedAddrRegs.
count(IndexReg)) {
1374 HardenedAddrRegs.
insert(
MI.getOperand(0).getReg());
1382 HardenedAddrRegs.
insert(BaseReg);
1384 HardenedAddrRegs.
insert(IndexReg);
1388 LoadDepRegs.
set(
Def.getReg());
1400 "Requested to harden both the address and def of a load!");
1403 if (HardenLoadAddr.
erase(&
MI)) {
1406 assert(MemRefBeginIdx >= 0 &&
"Cannot have an invalid index here!");
1414 hardenLoadAddr(
MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1420 if (HardenPostLoad.
erase(&
MI)) {
1421 assert(!
MI.isCall() &&
"Must not try to post-load harden a call!");
1429 MachineInstr *SunkMI = sinkPostLoadHardenedInst(
MI, HardenPostLoad);
1434 if (SunkMI != &
MI) {
1441 HardenPostLoad.
insert(SunkMI);
1446 unsigned HardenedReg = hardenPostLoad(
MI);
1449 AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1460 hardenIndirectCallOrJumpInstr(
MI, AddrRegToHardenedReg);
1467 if (!
MI.isCall() && !
MI.isReturn())
1472 if (
MI.isReturn() && !
MI.isCall()) {
1473 hardenReturnInstr(
MI);
1480 assert(
MI.isCall() &&
"Should only reach here for calls!");
1481 tracePredStateThroughCall(
MI);
1484 HardenPostLoad.
clear();
1485 HardenLoadAddr.
clear();
1486 HardenedAddrRegs.
clear();
1487 AddrRegToHardenedReg.
clear();
1492 LoadDepRegs.
clear();
1502 unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
1520 void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
1531 void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
1533 const DebugLoc &Loc,
unsigned PredStateReg) {
1538 auto ShiftI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::SHL64ri), TmpReg)
1543 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::OR64rr), X86::RSP)
1551 unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
1560 BuildMI(
MBB, InsertPt, Loc,
TII->get(TargetOpcode::COPY), TmpReg)
1563 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::SAR64ri), PredStateReg)
1569 return PredStateReg;
1572 void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
1584 if (BaseMO.
isFI()) {
1588 dbgs() <<
" Skipping hardening base of explicit stack frame load: ";
1590 }
else if (BaseMO.
getReg() == X86::RSP) {
1595 "Explicit RSP access with dynamic index!");
1597 dbgs() <<
" Cannot harden base of explicit RSP offset in a load!");
1598 }
else if (BaseMO.
getReg() == X86::RIP ||
1599 BaseMO.
getReg() == X86::NoRegister) {
1609 dbgs() <<
" Cannot harden base of "
1610 << (BaseMO.
getReg() == X86::RIP ?
"RIP-relative" :
"no-base")
1611 <<
" address in a load!");
1614 "Only allowed to have a frame index or register base.");
1615 HardenOpRegs.push_back(&BaseMO);
1618 if (IndexMO.
getReg() != X86::NoRegister &&
1619 (HardenOpRegs.empty() ||
1620 HardenOpRegs.front()->getReg() != IndexMO.
getReg()))
1621 HardenOpRegs.push_back(&IndexMO);
1623 assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
1624 "Should have exactly one or two registers to harden!");
1625 assert((HardenOpRegs.size() == 1 ||
1626 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
1627 "Should not have two of the same registers!");
1632 auto It = AddrRegToHardenedReg.
find(
Op->getReg());
1633 if (It == AddrRegToHardenedReg.
end())
1638 Op->setReg(It->second);
1642 if (HardenOpRegs.empty())
1646 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&
MBB);
1648 auto InsertPt =
MI.getIterator();
1653 unsigned FlagsReg = 0;
1654 if (EFLAGSLive && !Subtarget->hasBMI2()) {
1656 FlagsReg = saveEFLAGS(
MBB, InsertPt, Loc);
1666 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
1667 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
1668 assert(Subtarget->hasAVX2() &&
"AVX2-specific register classes!");
1669 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
1676 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::VMOV64toPQIrr), VStateReg)
1684 auto BroadcastI =
BuildMI(
MBB, InsertPt, Loc,
1686 : X86::VPBROADCASTQYrr),
1691 LLVM_DEBUG(
dbgs() <<
" Inserting broadcast: "; BroadcastI->dump();
1697 TII->get(
Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
1703 }
else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
1704 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
1705 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
1706 assert(Subtarget->hasAVX512() &&
"AVX512-specific register classes!");
1707 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
1708 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
1710 assert(Subtarget->hasVLX() &&
"AVX512VL-specific register classes!");
1714 unsigned BroadcastOp =
Is128Bit ? X86::VPBROADCASTQrZ128rr
1715 : Is256Bit ? X86::VPBROADCASTQrZ256rr
1716 : X86::VPBROADCASTQrZrr;
1718 BuildMI(
MBB, InsertPt, Loc,
TII->get(BroadcastOp), VStateReg)
1722 LLVM_DEBUG(
dbgs() <<
" Inserting broadcast: "; BroadcastI->dump();
1726 unsigned OrOp =
Is128Bit ? X86::VPORQZ128rr
1727 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
1728 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(OrOp), TmpReg)
1736 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
1737 "Not a supported register class for address hardening!");
1741 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::OR64rr), TmpReg)
1751 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::SHRX64rr), TmpReg)
1763 "Should not have checked this register yet!");
1764 AddrRegToHardenedReg[
Op->getReg()] = TmpReg;
1766 ++NumAddrRegsHardened;
1771 restoreEFLAGS(
MBB, InsertPt, Loc, FlagsReg);
1774 MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
1777 "Cannot get here with a non-invariant load!");
1779 "Cannot get here with a data invariant load "
1780 "that interferes with EFLAGS!");
1783 auto SinkCheckToSingleUse =
1800 "Data variant instruction being hardened!");
1808 assert(MemRefBeginIdx >= 0 &&
1809 "Should always have mem references here!");
1816 if ((BaseMO.
isReg() && BaseMO.
getReg() == DefReg) ||
1837 if (
UseMI.getDesc().getNumDefs() > 1)
1844 if (!UseDefReg.
isVirtual() || !canHardenRegister(UseDefReg))
1847 SingleUseMI = &
UseMI;
1852 return {SingleUseMI};
1866 bool X86SpeculativeLoadHardeningPass::canHardenRegister(
Register Reg) {
1873 unsigned RegIdx =
Log2_32(RegBytes);
1874 assert(RegIdx < 4 &&
"Unsupported register size");
1884 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
1885 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
1886 if (RC == NOREXRegClasses[RegIdx])
1890 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
1891 &X86::GR64RegClass};
1909 unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
1912 assert(canHardenRegister(
Reg) &&
"Cannot harden this register!");
1913 assert(
Reg.isVirtual() &&
"Cannot harden a physical register!");
1917 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&
MBB);
1918 assert((Bytes == 1 || Bytes == 2 || Bytes == 4 || Bytes == 8) &&
1919 "Unknown register size");
1923 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
1924 unsigned SubRegImm = SubRegImms[
Log2_32(Bytes)];
1926 BuildMI(
MBB, InsertPt, Loc,
TII->get(TargetOpcode::COPY), NarrowStateReg)
1927 .
addReg(StateReg, 0, SubRegImm);
1928 StateReg = NarrowStateReg;
1931 unsigned FlagsReg = 0;
1933 FlagsReg = saveEFLAGS(
MBB, InsertPt, Loc);
1936 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
1937 unsigned OrOpCode = OrOpCodes[
Log2_32(Bytes)];
1938 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(OrOpCode), NewReg)
1946 restoreEFLAGS(
MBB, InsertPt, Loc, FlagsReg);
1960 unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(
MachineInstr &
MI) {
1964 auto &DefOp =
MI.getOperand(0);
1965 Register OldDefReg = DefOp.getReg();
1972 DefOp.setReg(UnhardenedReg);
1977 unsigned HardenedReg = hardenValueInRegister(
1978 UnhardenedReg,
MBB, std::next(
MI.getIterator()), Loc);
1984 ++NumPostLoadRegsHardened;
2011 void X86SpeculativeLoadHardeningPass::hardenReturnInstr(
MachineInstr &
MI) {
2014 auto InsertPt =
MI.getIterator();
2024 mergePredStateIntoSP(
MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&
MBB));
2057 void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
2061 auto InsertPt =
MI.getIterator();
2074 BuildMI(
MBB, std::next(InsertPt), Loc,
TII->get(X86::LFENCE));
2076 ++NumLFENCEsInserted;
2082 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&
MBB);
2083 mergePredStateIntoSP(
MBB, InsertPt, Loc, StateReg);
2098 MI.setPostInstrSymbol(MF, RetSymbol);
2101 unsigned ExpectedRetAddrReg = 0;
2106 if (!Subtarget->getFrameLowering()->has128ByteRedZone(MF) ||
2128 !Subtarget->isPositionIndependent()) {
2129 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2132 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::LEA64r), ExpectedRetAddrReg)
2148 if (!ExpectedRetAddrReg) {
2150 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2160 unsigned NewStateReg = extractPredStateFromSP(
MBB, InsertPt, Loc);
2166 !Subtarget->isPositionIndependent()) {
2174 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::LEA64r), ActualRetAddrReg)
2191 auto CMovI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(CMovOp), UpdatedStateReg)
2199 PS->SSA.AddAvailableValue(&
MBB, UpdatedStateReg);
2217 void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
2220 switch (
MI.getOpcode()) {
2221 case X86::FARCALL16m:
2222 case X86::FARCALL32m:
2223 case X86::FARCALL64m:
2224 case X86::FARJMP16m:
2225 case X86::FARJMP32m:
2226 case X86::FARJMP64m:
2237 assert(!
MI.mayLoad() &&
"Found a lingering loading instruction!");
2241 if (!
MI.getOperand(0).isReg())
2246 auto &TargetOp =
MI.getOperand(0);
2247 Register OldTargetReg = TargetOp.getReg();
2252 unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2261 if (!HardenedTargetReg)
2262 HardenedTargetReg = hardenValueInRegister(
2263 OldTargetReg, *
MI.getParent(),
MI.getIterator(),
MI.getDebugLoc());
2266 TargetOp.setReg(HardenedTargetReg);
2268 ++NumCallsOrJumpsHardened;
2272 "X86 speculative load hardener",
false,
false)
2277 return new X86SpeculativeLoadHardeningPass();
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
unsigned succ_size() const
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This is an optimization pass for GlobalISel generic memory operations.
int getMemoryOperandNo(uint64_t TSFlags)
The function returns the MCInst operand # for the first field of the memory operand.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MachineInstrBuilder & UseMI
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static cl::opt< bool > HardenIndirectCallsAndJumps(PASS_KEY "-indirect", cl::desc("Harden indirect calls and jumps against using speculatively " "stored attacker controlled addresses. This is designed to " "mitigate Spectre v1.2 style attacks."), cl::init(true), cl::Hidden)
MCContext & getContext() const
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
void setIsKill(bool Val=true)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
Reg
All possible values of the reg field in the ModR/M byte.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
MachineOperand * findRegisterUseOperand(Register Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
static cl::opt< bool > HardenLoads(PASS_KEY "-loads", cl::desc("Sanitize loads from memory. When disable, no " "significant security is provided."), cl::init(true), cl::Hidden)
void insert(iterator MBBI, MachineBasicBlock *MBB)
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::count size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
CondCode getCondFromBranch(const MachineInstr &MI)
LLVM_NODISCARD T pop_back_val()
iterator_range< iterator > terminators()
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
unsigned const TargetRegisterInfo * TRI
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineOperand & getOperand(unsigned i) const
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
static const TargetRegisterClass * getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII, unsigned Opcode)
Compute the register class for the unfolded load.
Represent the analysis usage information of a pass.
@ Kill
The last use of a register.
FunctionPass * createX86SpeculativeLoadHardeningPass()
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
const HexagonInstrInfo * TII
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Describe properties that are true of each instruction in the target description file.
MachineOperand class - Representation of each machine instruction operand.
static cl::opt< bool > FenceCallAndRet(PASS_KEY "-fence-call-and-ret", cl::desc("Use a full speculation fence to harden both call and ret edges " "rather than a lighter weight mitigation."), cl::init(false), cl::Hidden)
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
STATISTIC(NumFunctions, "Total number of functions")
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
Analysis containing CSE Info
bool isCleanupFuncletEntry() const
Returns true if this is the entry block of a cleanup funclet.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Register getReg(unsigned Idx) const
Get the register for the operand index.
static cl::opt< bool > HardenEdgesWithLFENCE(PASS_KEY "-lfence", cl::desc("Use LFENCE along each conditional edge to harden against speculative " "loads rather than conditional movs and poisoned pointers."), cl::init(false), cl::Hidden)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
static cl::opt< bool > HardenInterprocedurally(PASS_KEY "-ip", cl::desc("Harden interprocedurally by passing our state in and out of " "functions in the high bits of the stack pointer."), cl::init(true), cl::Hidden)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
void setIsDead(bool Val=true)
reverse_instr_iterator instr_rend()
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::clear void clear()
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Representation of each machine instruction.
static MachineBasicBlock & splitEdge(MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount, MachineInstr *Br, MachineInstr *&UncondBr, const X86InstrInfo &TII)
static void canonicalizePHIOperands(MachineFunction &MF)
Removing duplicate PHI operands to leave the PHI in a canonical and predictable form.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
reverse_instr_iterator instr_rbegin()
bool test(unsigned Idx) const
initializer< Ty > init(const Ty &Val)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find iterator find(const_arg_type_t< KeyT > Val)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void setHasAddressTaken()
Set this block to reflect that it potentially is the target of an indirect branch.
LLVM currently emits rax rax movq rax rax ret It could narrow the loads and stores to emit rax rax movq rax rax ret The trouble is that there is a TokenFactor between the store and the load
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Register getReg() const
getReg - Returns the register number.
iterator_range< pred_iterator > predecessors()
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
bool insert(const value_type &X)
Insert a new element into the SetVector.
X86 speculative load hardener
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
MachineBasicBlock * getMBB() const
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
SmallVector< MachineOperand, 4 > Cond
iterator_range< succ_iterator > successors()
bool isEHPad() const
Returns true if the block is a landing pad.
StringRef - Represent a constant reference to a string, i.e.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
if(llvm_vc STREQUAL "") set(fake_version_inc "$
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
void splitSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New, bool NormalizeSuccProbs=false)
Split the old successor into old plus new and updates the probability info.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
unsigned const MachineRegisterInfo * MRI
Wrapper class representing virtual and physical registers.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
Function & getFunction()
Return the LLVM function that this machine code represents.
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end iterator end()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Iterator for intrusive lists based on ilist_node.
void sort(IteratorTy Start, IteratorTy End)
CodeModel::Model getCodeModel() const
Returns the code model.
auto unique(Range &&R, Predicate P)
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false)
Return a cmov opcode for the given register size in bytes, and operand type.
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
MachineSSAUpdater - This class updates SSA form for a set of virtual registers defined in multiple bl...
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, PASS_KEY, "X86 speculative load hardener", false, false) INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_NODISCARD bool empty() const
void setMBB(MachineBasicBlock *MBB)
static cl::opt< bool > EnablePostLoadHardening(PASS_KEY "-post-load", cl::desc("Harden the value loaded *after* it is loaded by " "flushing the loaded bits to 1. This is hard to do " "in general but can be done easily for GPRs."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSpeculativeLoadHardening("x86-speculative-load-hardening", cl::desc("Force enable speculative load hardening"), cl::init(false), cl::Hidden)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
A SetVector that performs no allocations if smaller than a certain size.
FunctionPass class - This class is used to implement most global optimizations.
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const TargetRegisterInfo &TRI)
static bool isEFLAGSDefLive(const MachineInstr &MI)
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
static bool hasVulnerableLoad(MachineFunction &MF)
Helper to scan a function for loads vulnerable to misspeculation that we want to harden.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.