60#define PASS_KEY "x86-slh"
61#define DEBUG_TYPE PASS_KEY
63STATISTIC(NumCondBranchesTraced,
"Number of conditional branches traced");
64STATISTIC(NumBranchesUntraced,
"Number of branches unable to trace");
66 "Number of address mode used registers hardaned");
68 "Number of post-load register values hardened");
70 "Number of calls or jumps requiring extra hardening");
71STATISTIC(NumInstsInserted,
"Number of instructions inserted");
72STATISTIC(NumLFENCEsInserted,
"Number of lfence instructions inserted");
75 "x86-speculative-load-hardening",
82 "Use LFENCE along each conditional edge to harden against speculative "
83 "loads rather than conditional movs and poisoned pointers."),
88 cl::desc(
"Harden the value loaded *after* it is loaded by "
89 "flushing the loaded bits to 1. This is hard to do "
90 "in general but can be done easily for GPRs."),
95 cl::desc(
"Use a full speculation fence to harden both call and ret edges "
96 "rather than a lighter weight mitigation."),
101 cl::desc(
"Harden interprocedurally by passing our state in and out of "
102 "functions in the high bits of the stack pointer."),
107 cl::desc(
"Sanitize loads from memory. When disable, no "
108 "significant security is provided."),
113 cl::desc(
"Harden indirect calls and jumps against using speculatively "
114 "stored attacker controlled addresses. This is designed to "
115 "mitigate Spectre v1.2 style attacks."),
120constexpr StringRef X86SLHPassName =
"X86 speculative load hardening";
126 StringRef getPassName()
const override {
return X86SLHPassName; }
127 bool runOnMachineFunction(MachineFunction &MF)
override;
128 void getAnalysisUsage(AnalysisUsage &AU)
const override;
134class X86SpeculativeLoadHardeningImpl {
136 X86SpeculativeLoadHardeningImpl() =
default;
138 bool run(MachineFunction &MF);
143 struct BlockCondInfo {
144 MachineBasicBlock *MBB;
148 SmallVector<MachineInstr *, 2> CondBrs;
150 MachineInstr *UncondBr;
158 const TargetRegisterClass *RC;
159 MachineSSAUpdater SSA;
161 PredState(MachineFunction &MF,
const TargetRegisterClass *RC)
165 const X86Subtarget *Subtarget =
nullptr;
166 MachineRegisterInfo *MRI =
nullptr;
167 const X86InstrInfo *TII =
nullptr;
168 const TargetRegisterInfo *TRI =
nullptr;
170 std::optional<PredState> PS;
172 void hardenEdgesWithLFENCE(MachineFunction &MF);
179 void unfoldCallAndJumpLoads(MachineFunction &MF);
182 tracePredStateThroughIndirectBranches(MachineFunction &MF);
184 void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
189 void restoreEFLAGS(MachineBasicBlock &
MBB,
193 void mergePredStateIntoSP(MachineBasicBlock &
MBB,
196 Register extractPredStateFromSP(MachineBasicBlock &
MBB,
201 hardenLoadAddr(MachineInstr &
MI, MachineOperand &BaseMO,
202 MachineOperand &IndexMO,
203 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
205 sinkPostLoadHardenedInst(MachineInstr &
MI,
206 SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
212 void hardenReturnInstr(MachineInstr &
MI);
213 void tracePredStateThroughCall(MachineInstr &
MI);
214 void hardenIndirectCallOrJumpInstr(
216 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
221bool X86SpeculativeLoadHardeningLegacy::runOnMachineFunction(
223 X86SpeculativeLoadHardeningImpl Impl;
230char X86SpeculativeLoadHardeningLegacy::ID = 0;
232void X86SpeculativeLoadHardeningLegacy::getAnalysisUsage(
233 AnalysisUsage &AU)
const {
241 assert(!Succ.
isEHPad() &&
"Shouldn't get edges to EH pads!");
255 "Didn't start with the right target!");
264 assert(
MBB.isSuccessor(&OldLayoutSucc) &&
265 "Without an unconditional branch, the old layout successor should "
266 "be an actual successor!");
270 UncondBr = &*BrBuilder;
281 "Cannot have a branchless successor and an unconditional branch!");
283 "A non-branch successor must have been a layout successor before "
284 "and now is a layout successor of the new block.");
290 if (SuccCount == 1) {
291 MBB.replaceSuccessor(&Succ, &NewMBB);
293 MBB.splitSuccessor(&Succ, &NewMBB);
307 assert(OpMBB.
isMBB() &&
"Block operand to a PHI is not a block!");
312 if (SuccCount == 1) {
318 MI.addOperand(MF, OpV);
325 for (
auto &LI : Succ.
liveins())
345 for (
auto &
MI :
MBB) {
368 while (!DupIndices.
empty()) {
391 if (
MI.getOpcode() == X86::LFENCE)
399 if (
MI.getOpcode() == X86::MFENCE)
411bool X86SpeculativeLoadHardeningImpl::run(MachineFunction &MF) {
427 PS.emplace(MF, &X86::GR64_NOSPRegClass);
435 hardenEdgesWithLFENCE(MF);
443 auto EntryInsertPt =
Entry.SkipPHIsLabelsAndDebug(
Entry.begin());
453 if (!HasVulnerableLoad && Infos.
empty())
458 const int PoisonVal = -1;
459 PS->PoisonReg =
MRI->createVirtualRegister(PS->RC);
460 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::MOV64ri32), PS->PoisonReg)
473 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::LFENCE));
475 ++NumLFENCEsInserted;
488 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
492 PS->InitialReg =
MRI->createVirtualRegister(PS->RC);
493 Register PredStateSubReg =
MRI->createVirtualRegister(&X86::GR32RegClass);
494 auto ZeroI =
BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::MOV32r0),
497 MachineOperand *ZeroEFLAGSDefOp =
498 ZeroI->findRegisterDefOperand(X86::EFLAGS,
nullptr);
500 "Must have an implicit def of EFLAGS!");
502 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::SUBREG_TO_REG),
515 PS->SSA.Initialize(PS->InitialReg);
516 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
519 auto CMovs = tracePredStateThroughCFG(MF, Infos);
529 for (MachineBasicBlock &
MBB : MF) {
535 PS->SSA.AddAvailableValue(
544 unfoldCallAndJumpLoads(MF);
547 auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
548 CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
554 tracePredStateThroughBlocksAndHarden(MF);
558 for (MachineInstr *CMovI : CMovs)
559 for (MachineOperand &
Op : CMovI->operands()) {
560 if (!
Op.isReg() ||
Op.getReg() != PS->InitialReg)
563 PS->SSA.RewriteUse(
Op);
575void X86SpeculativeLoadHardeningImpl::hardenEdgesWithLFENCE(
576 MachineFunction &MF) {
579 SmallSetVector<MachineBasicBlock *, 8> Blocks;
580 for (MachineBasicBlock &
MBB : MF) {
588 if (TermIt ==
MBB.
end() || !TermIt->isBranch())
595 if (!SuccMBB->isEHPad())
599 for (MachineBasicBlock *
MBB : Blocks) {
603 ++NumLFENCEsInserted;
608X86SpeculativeLoadHardeningImpl::collectBlockCondInfo(MachineFunction &MF) {
613 for (MachineBasicBlock &
MBB : MF) {
638 BlockCondInfo
Info = {&
MBB, {},
nullptr};
644 if (!
MI.isTerminator())
648 if (!
MI.isBranch()) {
649 Info.CondBrs.clear();
655 if (
MI.getOpcode() == X86::JMP_1) {
656 Info.CondBrs.clear();
671 Info.CondBrs.clear();
677 Info.CondBrs.push_back(&
MI);
679 if (
Info.CondBrs.empty()) {
680 ++NumBranchesUntraced;
681 LLVM_DEBUG(
dbgs() <<
"WARNING: unable to secure successors of block:\n";
700X86SpeculativeLoadHardeningImpl::tracePredStateThroughCFG(
708 for (
const BlockCondInfo &Info : Infos) {
709 MachineBasicBlock &
MBB = *
Info.MBB;
710 const SmallVectorImpl<MachineInstr *> &CondBrs =
Info.CondBrs;
711 MachineInstr *UncondBr =
Info.UncondBr;
715 ++NumCondBranchesTraced;
719 MachineBasicBlock *UncondSucc =
720 UncondBr ? (UncondBr->
getOpcode() == X86::JMP_1
726 SmallDenseMap<MachineBasicBlock *, int> SuccCounts;
728 ++SuccCounts[UncondSucc];
729 for (
auto *CondBr : CondBrs)
730 ++SuccCounts[CondBr->getOperand(0).getMBB()];
734 auto BuildCheckingBlockForSuccAndConds =
735 [&](MachineBasicBlock &
MBB, MachineBasicBlock &Succ,
int SuccCount,
736 MachineInstr *Br, MachineInstr *&UncondBr,
741 (SuccCount == 1 && Succ.pred_size() == 1)
743 : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII);
745 bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS);
747 CheckingMBB.addLiveIn(X86::EFLAGS);
750 auto InsertPt = CheckingMBB.begin();
751 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
752 "Should never have a PHI in the initial checking block as it "
753 "always has a single predecessor!");
757 Register CurStateReg = PS->InitialReg;
759 for (X86::CondCode Cond : Conds) {
760 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
761 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
763 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
766 auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
767 TII->get(CMovOp), UpdatedStateReg)
769 .addReg(PS->PoisonReg)
773 if (!LiveEFLAGS && Cond == Conds.back())
774 CMovI->findRegisterUseOperand(X86::EFLAGS, nullptr)
778 LLVM_DEBUG(dbgs() <<
" Inserting cmov: "; CMovI->dump();
783 if (CurStateReg == PS->InitialReg)
784 CMovs.push_back(&*CMovI);
787 CurStateReg = UpdatedStateReg;
792 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
795 std::vector<X86::CondCode> UncondCodeSeq;
796 for (
auto *CondBr : CondBrs) {
797 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
798 int &SuccCount = SuccCounts[&Succ];
802 UncondCodeSeq.push_back(
Cond);
804 BuildCheckingBlockForSuccAndConds(
MBB, Succ, SuccCount, CondBr, UncondBr,
827 assert(SuccCounts[UncondSucc] == 1 &&
828 "We should never have more than one edge to the unconditional "
829 "successor at this point because every other edge must have been "
834 UncondCodeSeq.erase(
llvm::unique(UncondCodeSeq), UncondCodeSeq.end());
837 BuildCheckingBlockForSuccAndConds(
MBB, *UncondSucc, 1,
838 UncondBr, UncondBr, UncondCodeSeq);
849static const TargetRegisterClass *
852 unsigned UnfoldedOpc =
TII.getOpcodeAfterMemoryUnfold(
853 Opcode,
true,
false, &Index);
855 return TII.getRegClass(
MCID, Index);
858void X86SpeculativeLoadHardeningImpl::unfoldCallAndJumpLoads(
859 MachineFunction &MF) {
860 for (MachineBasicBlock &
MBB : MF)
865 if (!
MI.isCall() && !
MI.isBranch())
871 switch (
MI.getOpcode()) {
874 dbgs() <<
"ERROR: Found an unexpected loading branch or call "
880 case X86::FARCALL16m:
881 case X86::FARCALL32m:
882 case X86::FARCALL64m:
891 case X86::CALL16m_NT:
893 case X86::CALL32m_NT:
895 case X86::CALL64m_NT:
902 case X86::TAILJMPm64:
903 case X86::TAILJMPm64_REX:
905 case X86::TCRETURNmi64:
906 case X86::TCRETURN_WINmi64:
907 case X86::TCRETURNmi: {
914 <<
"ERROR: Unable to unfold load from instruction:\n";
919 SmallVector<MachineInstr *, 2> NewMIs;
923 TII->unfoldMemoryOperand(MF,
MI,
Reg,
true,
927 "Computed unfolded register class but failed to unfold");
929 for (
auto *NewMI : NewMIs)
933 if (
MI.isCandidateForAdditionalCallInfo())
934 MF.eraseAdditionalCallInfo(&
MI);
936 MI.eraseFromParent();
938 dbgs() <<
"Unfolded load successfully into:\n";
939 for (
auto *NewMI : NewMIs) {
969X86SpeculativeLoadHardeningImpl::tracePredStateThroughIndirectBranches(
970 MachineFunction &MF) {
975 MachineSSAUpdater TargetAddrSSA(MF);
976 TargetAddrSSA.Initialize(
MRI->createVirtualRegister(&X86::GR64RegClass));
979 SmallPtrSet<MachineBasicBlock *, 4> IndirectTerminatedMBBs;
984 SmallPtrSet<MachineBasicBlock *, 4> IndirectTargetMBBs;
988 for (MachineBasicBlock &
MBB : MF) {
995 MachineInstr &TI = *MII;
1007 case X86::FARJMP16m:
1008 case X86::FARJMP32m:
1009 case X86::FARJMP64m:
1015 case X86::JMP16m_NT:
1017 case X86::JMP32m_NT:
1019 case X86::JMP64m_NT:
1025 "Support for 16-bit indirect branches is not implemented.");
1028 "Support for 32-bit indirect branches is not implemented.");
1037 return !OtherTI.isDebugInstr() && &OtherTI != &TI;
1040 dbgs() <<
"ERROR: Found other terminators in a block with an indirect "
1041 "branch! This is not yet supported! Terminator sequence:\n";
1051 TargetAddrSSA.AddAvailableValue(&
MBB, TargetReg);
1062 if (IndirectTargetMBBs.
empty())
1068 for (MachineBasicBlock &
MBB : MF) {
1070 if (!IndirectTargetMBBs.
count(&
MBB))
1077 "Unexpected EH pad as target of an indirect branch!");
1085 "Cannot check within a block that already has live-in EFLAGS!");
1092 if (IndirectTerminatedMBBs.
count(Pred))
1100 if (!
llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) {
1101 return Succ->isEHPad() || Succ == &MBB;
1104 dbgs() <<
"ERROR: Found conditional entry to target of indirect "
1110 "an indirect branch!");
1116 auto InsertPt = Pred->getFirstTerminator();
1117 Register TargetReg =
MRI->createVirtualRegister(&X86::GR64RegClass);
1122 TII->get(X86::MOV64ri32), TargetReg)
1142 TargetAddrSSA.AddAvailableValue(Pred, TargetReg);
1150 Register TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&
MBB);
1161 .
addReg(TargetReg, RegState::Kill)
1168 Register AddrReg =
MRI->createVirtualRegister(&X86::GR64RegClass);
1180 .
addReg(TargetReg, RegState::Kill)
1181 .
addReg(AddrReg, RegState::Kill);
1188 int PredStateSizeInBytes =
TRI->getRegSizeInBits(*PS->RC) / 8;
1190 Register UpdatedStateReg =
MRI->createVirtualRegister(PS->RC);
1204 PS->SSA.AddAvailableValue(&
MBB, UpdatedStateReg);
1215 MI.findRegisterDefOperand(X86::EFLAGS,
nullptr)) {
1216 return !DefOp->isDead();
1227 MI.findRegisterDefOperand(X86::EFLAGS,
nullptr)) {
1229 if (DefOp->isDead())
1237 if (
MI.killsRegister(X86::EFLAGS, &
TRI))
1243 return MBB.isLiveIn(X86::EFLAGS);
1273void X86SpeculativeLoadHardeningImpl::tracePredStateThroughBlocksAndHarden(
1274 MachineFunction &MF) {
1275 SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
1276 SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
1278 SmallSet<Register, 16> HardenedAddrRegs;
1280 SmallDenseMap<Register, Register, 32> AddrRegToHardenedReg;
1285 SparseBitVector<> LoadDepRegs;
1287 for (MachineBasicBlock &
MBB : MF) {
1304 for (MachineInstr &
MI :
MBB) {
1310 return Op.isReg() && LoadDepRegs.test(Op.getReg().id());
1312 for (MachineOperand &Def :
MI.defs())
1314 LoadDepRegs.
set(
Def.getReg().id());
1319 if (
MI.getOpcode() == X86::LFENCE)
1327 if (
MI.getOpcode() == X86::MFENCE)
1332 if (MemRefBeginIdx < 0) {
1334 <<
"WARNING: unable to harden loading instruction: ";
1339 MachineOperand &BaseMO =
1341 MachineOperand &IndexMO =
1347 if (!BaseMO.
isFI() && BaseMO.
getReg() != X86::RIP &&
1351 IndexReg = IndexMO.
getReg();
1353 if (!BaseReg && !IndexReg)
1361 if ((BaseReg && LoadDepRegs.
test(
BaseReg.id())) ||
1362 (IndexReg && LoadDepRegs.
test(IndexReg.
id())))
1372 MI.getOperand(0).isReg() &&
1373 canHardenRegister(
MI.getOperand(0).getReg()) &&
1374 !HardenedAddrRegs.
count(BaseReg) &&
1375 !HardenedAddrRegs.
count(IndexReg)) {
1377 HardenedAddrRegs.
insert(
MI.getOperand(0).getReg());
1385 HardenedAddrRegs.
insert(BaseReg);
1387 HardenedAddrRegs.
insert(IndexReg);
1389 for (MachineOperand &Def :
MI.defs())
1391 LoadDepRegs.
set(
Def.getReg().id());
1399 for (MachineInstr &
MI :
MBB) {
1403 "Requested to harden both the address and def of a load!");
1406 if (HardenLoadAddr.
erase(&
MI)) {
1408 assert(MemRefBeginIdx >= 0 &&
"Cannot have an invalid index here!");
1410 MachineOperand &BaseMO =
1412 MachineOperand &IndexMO =
1414 hardenLoadAddr(
MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1420 if (HardenPostLoad.
erase(&
MI)) {
1421 assert(!
MI.isCall() &&
"Must not try to post-load harden a call!");
1429 MachineInstr *SunkMI = sinkPostLoadHardenedInst(
MI, HardenPostLoad);
1434 if (SunkMI != &
MI) {
1441 HardenPostLoad.
insert(SunkMI);
1449 AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1460 hardenIndirectCallOrJumpInstr(
MI, AddrRegToHardenedReg);
1467 if (!
MI.isCall() && !
MI.isReturn())
1472 if (
MI.isReturn() && !
MI.isCall()) {
1473 hardenReturnInstr(
MI);
1480 assert(
MI.isCall() &&
"Should only reach here for calls!");
1481 tracePredStateThroughCall(
MI);
1484 HardenPostLoad.
clear();
1485 HardenLoadAddr.
clear();
1486 HardenedAddrRegs.
clear();
1487 AddrRegToHardenedReg.
clear();
1492 LoadDepRegs.
clear();
1502Register X86SpeculativeLoadHardeningImpl::saveEFLAGS(
1520void X86SpeculativeLoadHardeningImpl::restoreEFLAGS(
1531void X86SpeculativeLoadHardeningImpl::mergePredStateIntoSP(
1534 Register TmpReg =
MRI->createVirtualRegister(PS->RC);
1538 auto ShiftI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::SHL64ri), TmpReg)
1539 .
addReg(PredStateReg, RegState::Kill)
1543 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::OR64rr), X86::RSP)
1545 .
addReg(TmpReg, RegState::Kill);
1551Register X86SpeculativeLoadHardeningImpl::extractPredStateFromSP(
1554 Register PredStateReg =
MRI->createVirtualRegister(PS->RC);
1555 Register TmpReg =
MRI->createVirtualRegister(PS->RC);
1560 BuildMI(
MBB, InsertPt, Loc,
TII->get(TargetOpcode::COPY), TmpReg)
1563 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::SAR64ri), PredStateReg)
1564 .
addReg(TmpReg, RegState::Kill)
1565 .
addImm(
TRI->getRegSizeInBits(*PS->RC) - 1);
1569 return PredStateReg;
1572void X86SpeculativeLoadHardeningImpl::hardenLoadAddr(
1573 MachineInstr &
MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
1574 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
1575 MachineBasicBlock &
MBB = *
MI.getParent();
1584 if (BaseMO.
isFI()) {
1588 dbgs() <<
" Skipping hardening base of explicit stack frame load: ";
1590 }
else if (BaseMO.
getReg() == X86::RSP) {
1595 "Explicit RSP access with dynamic index!");
1597 dbgs() <<
" Cannot harden base of explicit RSP offset in a load!");
1598 }
else if (BaseMO.
getReg() == X86::RIP ||
1599 BaseMO.
getReg() == X86::NoRegister) {
1609 dbgs() <<
" Cannot harden base of "
1610 << (BaseMO.
getReg() == X86::RIP ?
"RIP-relative" :
"no-base")
1611 <<
" address in a load!");
1614 "Only allowed to have a frame index or register base.");
1618 if (IndexMO.
getReg() != X86::NoRegister &&
1619 (HardenOpRegs.
empty() ||
1620 HardenOpRegs.
front()->getReg() != IndexMO.
getReg()))
1624 "Should have exactly one or two registers to harden!");
1626 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
1627 "Should not have two of the same registers!");
1632 auto It = AddrRegToHardenedReg.
find(
Op->getReg());
1633 if (It == AddrRegToHardenedReg.
end())
1638 Op->setReg(It->second);
1642 if (HardenOpRegs.
empty())
1646 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&
MBB);
1648 auto InsertPt =
MI.getIterator();
1654 if (EFLAGSLive && !Subtarget->hasBMI2()) {
1656 FlagsReg = saveEFLAGS(
MBB, InsertPt, Loc);
1659 for (MachineOperand *
Op : HardenOpRegs) {
1661 auto *OpRC =
MRI->getRegClass(OpReg);
1662 Register TmpReg =
MRI->createVirtualRegister(OpRC);
1666 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
1667 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
1668 assert(Subtarget->
hasAVX2() &&
"AVX2-specific register classes!");
1669 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
1674 Register VStateReg =
MRI->createVirtualRegister(&X86::VR128RegClass);
1676 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::VMOV64toPQIrr), VStateReg)
1683 Register VBStateReg =
MRI->createVirtualRegister(OpRC);
1684 auto BroadcastI =
BuildMI(
MBB, InsertPt, Loc,
1685 TII->get(Is128Bit ? X86::VPBROADCASTQrr
1686 : X86::VPBROADCASTQYrr),
1691 LLVM_DEBUG(
dbgs() <<
" Inserting broadcast: "; BroadcastI->dump();
1697 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
1703 }
else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
1704 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
1705 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
1706 assert(Subtarget->
hasAVX512() &&
"AVX512-specific register classes!");
1707 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
1708 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
1709 if (Is128Bit || Is256Bit)
1710 assert(Subtarget->hasVLX() &&
"AVX512VL-specific register classes!");
1713 Register VStateReg =
MRI->createVirtualRegister(OpRC);
1714 unsigned BroadcastOp =
Is128Bit ? X86::VPBROADCASTQrZ128rr
1715 : Is256Bit ? X86::VPBROADCASTQrZ256rr
1716 : X86::VPBROADCASTQrZrr;
1718 BuildMI(
MBB, InsertPt, Loc,
TII->get(BroadcastOp), VStateReg)
1722 LLVM_DEBUG(
dbgs() <<
" Inserting broadcast: "; BroadcastI->dump();
1726 unsigned OrOp =
Is128Bit ? X86::VPORQZ128rr
1727 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
1728 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(OrOp), TmpReg)
1736 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
1737 "Not a supported register class for address hardening!");
1741 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::OR64rr), TmpReg)
1751 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::SHRX64rr), TmpReg)
1763 "Should not have checked this register yet!");
1764 AddrRegToHardenedReg[
Op->getReg()] = TmpReg;
1766 ++NumAddrRegsHardened;
1771 restoreEFLAGS(
MBB, InsertPt, Loc, FlagsReg);
1774MachineInstr *X86SpeculativeLoadHardeningImpl::sinkPostLoadHardenedInst(
1775 MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) {
1777 "Cannot get here with a non-invariant load!");
1779 "Cannot get here with a data invariant load "
1780 "that interferes with EFLAGS!");
1783 auto SinkCheckToSingleUse =
1784 [&](MachineInstr &
MI) -> std::optional<MachineInstr *> {
1790 MachineInstr *SingleUseMI =
nullptr;
1791 for (MachineInstr &
UseMI :
MRI->use_instructions(DefReg)) {
1800 "Data variant instruction being hardened!");
1807 assert(MemRefBeginIdx >= 0 &&
1808 "Should always have mem references here!");
1810 MachineOperand &BaseMO =
1812 MachineOperand &IndexMO =
1814 if ((BaseMO.
isReg() && BaseMO.
getReg() == DefReg) ||
1835 if (
UseMI.getDesc().getNumDefs() > 1)
1842 if (!canHardenRegister(UseDefReg))
1845 SingleUseMI = &
UseMI;
1850 return {SingleUseMI};
1853 MachineInstr *
MI = &InitialMI;
1854 while (std::optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*
MI)) {
1864bool X86SpeculativeLoadHardeningImpl::canHardenRegister(
Register Reg) {
1869 auto *RC =
MRI->getRegClass(
Reg);
1870 int RegBytes =
TRI->getRegSizeInBits(*RC) / 8;
1875 unsigned RegIdx =
Log2_32(RegBytes);
1876 assert(RegIdx < 4 &&
"Unsupported register size");
1885 const TargetRegisterClass *NOREXRegClasses[] = {
1886 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
1887 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
1888 if (RC == NOREXRegClasses[RegIdx])
1891 const TargetRegisterClass *GPRRegClasses[] = {
1892 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
1893 &X86::GR64RegClass};
1894 return RC->hasSuperClassEq(GPRRegClasses[RegIdx]);
1911Register X86SpeculativeLoadHardeningImpl::hardenValueInRegister(
1914 assert(canHardenRegister(
Reg) &&
"Cannot harden this register!");
1916 auto *RC =
MRI->getRegClass(
Reg);
1917 int Bytes =
TRI->getRegSizeInBits(*RC) / 8;
1918 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&
MBB);
1919 assert((Bytes == 1 || Bytes == 2 || Bytes == 4 || Bytes == 8) &&
1920 "Unknown register size");
1924 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
1925 unsigned SubRegImm = SubRegImms[
Log2_32(Bytes)];
1926 Register NarrowStateReg =
MRI->createVirtualRegister(RC);
1927 BuildMI(
MBB, InsertPt, Loc,
TII->get(TargetOpcode::COPY), NarrowStateReg)
1928 .
addReg(StateReg, {}, SubRegImm);
1929 StateReg = NarrowStateReg;
1934 FlagsReg = saveEFLAGS(
MBB, InsertPt, Loc);
1937 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
1938 unsigned OrOpCode = OrOpCodes[
Log2_32(Bytes)];
1939 auto OrI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(OrOpCode), NewReg)
1947 restoreEFLAGS(
MBB, InsertPt, Loc, FlagsReg);
1961Register X86SpeculativeLoadHardeningImpl::hardenPostLoad(MachineInstr &
MI) {
1962 MachineBasicBlock &
MBB = *
MI.getParent();
1965 auto &DefOp =
MI.getOperand(0);
1966 Register OldDefReg = DefOp.getReg();
1967 auto *DefRC =
MRI->getRegClass(OldDefReg);
1972 Register UnhardenedReg =
MRI->createVirtualRegister(DefRC);
1973 DefOp.setReg(UnhardenedReg);
1978 Register HardenedReg = hardenValueInRegister(
1979 UnhardenedReg,
MBB, std::next(
MI.getIterator()), Loc);
1983 MRI->replaceRegWith( OldDefReg, HardenedReg);
1985 ++NumPostLoadRegsHardened;
2012void X86SpeculativeLoadHardeningImpl::hardenReturnInstr(MachineInstr &
MI) {
2013 MachineBasicBlock &
MBB = *
MI.getParent();
2015 auto InsertPt =
MI.getIterator();
2025 mergePredStateIntoSP(
MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&
MBB));
2058void X86SpeculativeLoadHardeningImpl::tracePredStateThroughCall(
2060 MachineBasicBlock &
MBB = *
MI.getParent();
2062 auto InsertPt =
MI.getIterator();
2075 BuildMI(
MBB, std::next(InsertPt), Loc,
TII->get(X86::LFENCE));
2077 ++NumLFENCEsInserted;
2083 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&
MBB);
2084 mergePredStateIntoSP(
MBB, InsertPt, Loc, StateReg);
2099 MI.setPostInstrSymbol(MF, RetSymbol);
2101 const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
2127 ExpectedRetAddrReg =
MRI->createVirtualRegister(AddrRC);
2130 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2133 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::LEA64r), ExpectedRetAddrReg)
2149 if (!ExpectedRetAddrReg) {
2150 ExpectedRetAddrReg =
MRI->createVirtualRegister(AddrRC);
2151 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2161 Register NewStateReg = extractPredStateFromSP(
MBB, InsertPt, Loc);
2171 .
addReg(ExpectedRetAddrReg, RegState::Kill)
2174 Register ActualRetAddrReg =
MRI->createVirtualRegister(AddrRC);
2175 BuildMI(
MBB, InsertPt, Loc,
TII->get(X86::LEA64r), ActualRetAddrReg)
2182 .
addReg(ExpectedRetAddrReg, RegState::Kill)
2183 .
addReg(ActualRetAddrReg, RegState::Kill);
2188 int PredStateSizeInBytes =
TRI->getRegSizeInBits(*PS->RC) / 8;
2191 Register UpdatedStateReg =
MRI->createVirtualRegister(PS->RC);
2192 auto CMovI =
BuildMI(
MBB, InsertPt, Loc,
TII->get(CMovOp), UpdatedStateReg)
2193 .
addReg(NewStateReg, RegState::Kill)
2200 PS->SSA.AddAvailableValue(&
MBB, UpdatedStateReg);
2218void X86SpeculativeLoadHardeningImpl::hardenIndirectCallOrJumpInstr(
2220 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
2221 switch (
MI.getOpcode()) {
2222 case X86::FARCALL16m:
2223 case X86::FARCALL32m:
2224 case X86::FARCALL64m:
2225 case X86::FARJMP16m:
2226 case X86::FARJMP32m:
2227 case X86::FARJMP64m:
2238 assert(!
MI.mayLoad() &&
"Found a lingering loading instruction!");
2242 if (!
MI.getOperand(0).isReg())
2247 auto &TargetOp =
MI.getOperand(0);
2248 Register OldTargetReg = TargetOp.getReg();
2253 Register &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2262 if (!HardenedTargetReg)
2263 HardenedTargetReg = hardenValueInRegister(
2264 OldTargetReg, *
MI.getParent(),
MI.getIterator(),
MI.getDebugLoc());
2267 TargetOp.setReg(HardenedTargetReg);
2269 ++NumCallsOrJumpsHardened;
2275 X86SpeculativeLoadHardeningImpl Impl;
2276 const bool Changed = Impl.run(MF);
2285 "X86 speculative load hardener",
false,
false)
2290 return new X86SpeculativeLoadHardeningLegacy();
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > HardenLoads("aarch64-slh-loads", cl::Hidden, cl::desc("Sanitize loads from memory."), cl::init(true))
This file defines the DenseMap class.
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseBitVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static MachineBasicBlock & splitEdge(MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount, MachineInstr *Br, MachineInstr *&UncondBr, const X86InstrInfo &TII)
static cl::opt< bool > HardenLoads(PASS_KEY "-loads", cl::desc("Sanitize loads from memory. When disable, no " "significant security is provided."), cl::init(true), cl::Hidden)
static void canonicalizePHIOperands(MachineFunction &MF)
Removing duplicate PHI operands to leave the PHI in a canonical and predictable form.
static cl::opt< bool > HardenInterprocedurally(PASS_KEY "-ip", cl::desc("Harden interprocedurally by passing our state in and out of " "functions in the high bits of the stack pointer."), cl::init(true), cl::Hidden)
static cl::opt< bool > FenceCallAndRet(PASS_KEY "-fence-call-and-ret", cl::desc("Use a full speculation fence to harden both call and ret edges " "rather than a lighter weight mitigation."), cl::init(false), cl::Hidden)
static cl::opt< bool > EnablePostLoadHardening(PASS_KEY "-post-load", cl::desc("Harden the value loaded *after* it is loaded by " "flushing the loaded bits to 1. This is hard to do " "in general but can be done easily for GPRs."), cl::init(true), cl::Hidden)
static cl::opt< bool > HardenEdgesWithLFENCE(PASS_KEY "-lfence", cl::desc("Use LFENCE along each conditional edge to harden against speculative " "loads rather than conditional movs and poisoned pointers."), cl::init(false), cl::Hidden)
static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const TargetRegisterInfo &TRI)
static cl::opt< bool > EnableSpeculativeLoadHardening("x86-speculative-load-hardening", cl::desc("Force enable speculative load hardening"), cl::init(false), cl::Hidden)
static const TargetRegisterClass * getRegClassForUnfoldedLoad(const X86InstrInfo &TII, unsigned Opcode)
Compute the register class for the unfolded load.
static bool hasVulnerableLoad(MachineFunction &MF)
Helper to scan a function for loads vulnerable to misspeculation that we want to harden.
static bool isEFLAGSDefLive(const MachineInstr &MI)
static cl::opt< bool > HardenIndirectCallsAndJumps(PASS_KEY "-indirect", cl::desc("Harden indirect calls and jumps against using speculatively " "stored attacker controlled addresses. This is designed to " "mitigate Spectre v1.2 style attacks."), cl::init(true), cl::Hidden)
Represents analyses that only rely on functions' control flow.
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
FunctionPass class - This class is used to implement most global optimizations.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
reverse_instr_iterator instr_rbegin()
LLVM_ABI iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
LLVM_ABI iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg=Register(), bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
LLVM_ABI void dump() const
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
reverse_instr_iterator instr_rend()
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
iterator_range< pred_iterator > predecessors()
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isCleanupFuncletEntry() const
Returns true if this is the entry block of a cleanup funclet.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
MachineOperand * findRegisterUseOperand(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool test(unsigned Idx) const
StringRef - Represent a constant reference to a string, i.e.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool has128ByteRedZone(const MachineFunction &MF) const
Return true if the function has a redzone (accessible bytes past the frame of the top of stack functi...
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
const X86InstrInfo * getInstrInfo() const override
bool isPositionIndependent() const
const X86RegisterInfo * getRegisterInfo() const override
const X86FrameLowering * getFrameLowering() const override
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
CondCode getCondFromBranch(const MachineInstr &MI)
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< DefNode * > Def
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
auto unique(Range &&R, Predicate P)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
FunctionPass * createX86SpeculativeLoadHardeningLegacyPass()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...