29#define DEBUG_TYPE "x86-pseudo"
30#define X86_EXPAND_PSEUDO_NAME "X86 pseudo instruction expansion pass"
55 MachineFunctionProperties::Property::NoVRegs);
59 return "X86 pseudo instruction expansion pass";
78 void expandVastartSaveXmmRegs(
82char X86ExpandPseudo::ID = 0;
89void X86ExpandPseudo::expandICallBranchFunnel(
98 std::vector<std::pair<MachineBasicBlock *, unsigned>> TargetMBBs;
103 auto CmpTarget = [&](
unsigned Target) {
104 if (Selector.
isReg())
118 auto CreateMBB = [&]() {
129 auto *ElseMBB = CreateMBB();
130 MF->
insert(InsPt, ElseMBB);
135 auto EmitCondJumpTarget = [&](
unsigned CC,
unsigned Target) {
136 auto *ThenMBB = CreateMBB();
137 TargetMBBs.push_back({ThenMBB,
Target});
138 EmitCondJump(
CC, ThenMBB);
141 auto EmitTailCall = [&](
unsigned Target) {
146 std::function<void(
unsigned,
unsigned)> EmitBranchFunnel =
148 if (NumTargets == 1) {
153 if (NumTargets == 2) {
160 if (NumTargets < 6) {
168 auto *ThenMBB = CreateMBB();
172 EmitBranchFunnel(
FirstTarget + (NumTargets / 2) + 1,
173 NumTargets - (NumTargets / 2) - 1);
175 MF->
insert(InsPt, ThenMBB);
182 for (
auto P : TargetMBBs) {
187 JTMBB->
erase(JTInst);
197 assert((
MI.getOperand(1).isGlobal() ||
MI.getOperand(1).isReg()) &&
198 "invalid operand for regular call");
200 if (
MI.getOpcode() == X86::CALL64m_RVMARKER)
202 else if (
MI.getOpcode() == X86::CALL64r_RVMARKER)
204 else if (
MI.getOpcode() == X86::CALL64pcrel32_RVMARKER)
205 Opc = X86::CALL64pcrel32;
210 bool RAXImplicitDead =
false;
214 if (
Op.isReg() &&
Op.isImplicit() &&
Op.isDead() &&
215 TRI->regsOverlap(
Op.getReg(), X86::RAX)) {
218 RAXImplicitDead =
true;
228 auto TargetReg = STI->getTargetTriple().isOSWindows() ? X86::RCX : X86::RDI;
233 if (
MI.shouldUpdateCallSiteInfo())
248 MI.eraseFromParent();
253 if (
TM.getTargetTriple().isOSDarwin())
264 unsigned Opcode =
MI.getOpcode();
266#define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC)
270 case X86::TCRETURNdi:
271 case X86::TCRETURNdicc:
272 case X86::TCRETURNri:
273 case X86::TCRETURNmi:
274 case X86::TCRETURNdi64:
275 case X86::TCRETURNdi64cc:
276 case X86::TCRETURNri64:
277 case X86::TCRETURNmi64: {
278 bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
282 assert(StackAdjust.
isImm() &&
"Expecting immediate value.");
285 int StackAdj = StackAdjust.
getImm();
286 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
288 assert(MaxTCDelta <= 0 &&
"MaxTCDelta should never be positive");
291 Offset = StackAdj - MaxTCDelta;
292 assert(
Offset >= 0 &&
"Offset should never be negative");
294 if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) {
295 assert(
Offset == 0 &&
"Conditional tail call cannot adjust the stack.");
305 bool IsWin64 = STI->isTargetWin64();
306 if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc ||
307 Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) {
310 case X86::TCRETURNdi:
313 case X86::TCRETURNdicc:
314 Op = X86::TAILJMPd_CC;
316 case X86::TCRETURNdi64cc:
318 "Conditional tail calls confuse "
319 "the Win64 unwinder.");
320 Op = X86::TAILJMPd64_CC;
325 Op = X86::TAILJMPd64;
337 if (
Op == X86::TAILJMPd_CC ||
Op == X86::TAILJMPd64_CC) {
341 }
else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
342 unsigned Op = (Opcode == X86::TCRETURNmi)
344 : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
348 }
else if (Opcode == X86::TCRETURNri64) {
351 TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
364 if (
MBBI->isCandidateForCallSiteEntry())
373 case X86::EH_RETURN64: {
375 assert(DestAddr.
isReg() &&
"Offset should be in register!");
376 const bool Uses64BitFramePtr =
377 STI->isTarget64BitLP64() || STI->isTargetNaCl64();
380 TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
387 int64_t StackAdj =
MBBI->getOperand(0).getImm();
388 X86FL->emitSPUpdate(
MBB,
MBBI,
DL, StackAdj,
true);
390 unsigned RetOp = STI->is64Bit() ? X86::IRET64 : X86::IRET32;
392 if (STI->is64Bit() && STI->hasUINTR() &&
401 int64_t StackAdj =
MBBI->getOperand(0).getImm();
405 TII->get(STI->is64Bit() ? X86::RET64 : X86::RET32));
406 }
else if (isUInt<16>(StackAdj)) {
408 TII->get(STI->is64Bit() ? X86::RETI64 : X86::RETI32))
412 "shouldn't need to do this for x86_64 targets!");
416 X86FL->emitSPUpdate(
MBB,
MBBI,
DL, StackAdj,
true);
420 for (
unsigned I = 1, E =
MBBI->getNumOperands();
I != E; ++
I)
425 case X86::LCMPXCHG16B_SAVE_RBX: {
461 case X86::MASKPAIR16LOAD: {
463 assert(Disp >= 0 && Disp <= INT32_MAX - 2 &&
"Unexpected displacement");
465 bool DstIsDead =
MBBI->getOperand(0).isDead();
466 Register Reg0 =
TRI->getSubReg(Reg, X86::sub_mask_0);
467 Register Reg1 =
TRI->getSubReg(Reg, X86::sub_mask_1);
477 MIBLo.
add(
MBBI->getOperand(1 + i));
481 MIBHi.
add(
MBBI->getOperand(1 + i));
490 MIBLo.setMemRefs(MMOLo);
491 MIBHi.setMemRefs(MMOHi);
497 case X86::MASKPAIR16STORE: {
499 assert(Disp >= 0 && Disp <= INT32_MAX - 2 &&
"Unexpected displacement");
502 Register Reg0 =
TRI->getSubReg(Reg, X86::sub_mask_0);
503 Register Reg1 =
TRI->getSubReg(Reg, X86::sub_mask_1);
511 MIBLo.add(
MBBI->getOperand(i));
513 MIBHi.addImm(Disp + 2);
515 MIBHi.add(
MBBI->getOperand(i));
526 MIBLo.setMemRefs(MMOLo);
527 MIBHi.setMemRefs(MMOHi);
533 case X86::MWAITX_SAVE_RBX: {
553 case TargetOpcode::ICALL_BRANCH_FUNNEL:
554 expandICallBranchFunnel(&
MBB,
MBBI);
556 case X86::PLDTILECFGV: {
560 case X86::PTILELOADDV:
561 case X86::PTILELOADDT1V:
562 case X86::PTILELOADDRSV:
563 case X86::PTILELOADDRST1V:
564 case X86::PTCVTROWD2PSrreV:
565 case X86::PTCVTROWD2PSrriV:
566 case X86::PTCVTROWPS2PBF16HrreV:
567 case X86::PTCVTROWPS2PBF16HrriV:
568 case X86::PTCVTROWPS2PBF16LrreV:
569 case X86::PTCVTROWPS2PBF16LrriV:
570 case X86::PTCVTROWPS2PHHrreV:
571 case X86::PTCVTROWPS2PHHrriV:
572 case X86::PTCVTROWPS2PHLrreV:
573 case X86::PTCVTROWPS2PHLrriV:
574 case X86::PTILEMOVROWrreV:
575 case X86::PTILEMOVROWrriV: {
576 for (
unsigned i = 2; i > 0; --i)
580 case X86::PTILELOADDRSV:
581 Opc = X86::TILELOADDRS;
583 case X86::PTILELOADDRST1V:
584 Opc = X86::TILELOADDRST1;
586 case X86::PTILELOADDV:
589 case X86::PTILELOADDT1V:
592 case X86::PTCVTROWD2PSrreV:
593 Opc = X86::TCVTROWD2PSrre;
595 case X86::PTCVTROWD2PSrriV:
596 Opc = X86::TCVTROWD2PSrri;
598 case X86::PTCVTROWPS2PBF16HrreV:
599 Opc = X86::TCVTROWPS2PBF16Hrre;
601 case X86::PTCVTROWPS2PBF16HrriV:
602 Opc = X86::TCVTROWPS2PBF16Hrri;
604 case X86::PTCVTROWPS2PBF16LrreV:
605 Opc = X86::TCVTROWPS2PBF16Lrre;
607 case X86::PTCVTROWPS2PBF16LrriV:
608 Opc = X86::TCVTROWPS2PBF16Lrri;
610 case X86::PTCVTROWPS2PHHrreV:
611 Opc = X86::TCVTROWPS2PHHrre;
613 case X86::PTCVTROWPS2PHHrriV:
614 Opc = X86::TCVTROWPS2PHHrri;
616 case X86::PTCVTROWPS2PHLrreV:
617 Opc = X86::TCVTROWPS2PHLrre;
619 case X86::PTCVTROWPS2PHLrriV:
620 Opc = X86::TCVTROWPS2PHLrri;
622 case X86::PTILEMOVROWrreV:
623 Opc = X86::TILEMOVROWrre;
625 case X86::PTILEMOVROWrriV:
626 Opc = X86::TILEMOVROWrri;
631 MI.setDesc(
TII->get(Opc));
639 case X86::PTILEPAIRLOAD: {
642 bool DstIsDead =
MBBI->getOperand(0).isDead();
643 Register TReg0 =
TRI->getSubReg(TReg, X86::sub_t0);
644 Register TReg1 =
TRI->getSubReg(TReg, X86::sub_t1);
645 unsigned TmmSize =
TRI->getRegSizeInBits(X86::TILERegClass) / 8;
655 MIBLo.
add(
MBBI->getOperand(1 + i));
657 MIBHi.
addImm(Disp + TmmSize);
659 MIBHi.
add(
MBBI->getOperand(1 + i));
686 case X86::PTILEPAIRSTORE: {
690 Register TReg0 =
TRI->getSubReg(TReg, X86::sub_t0);
691 Register TReg1 =
TRI->getSubReg(TReg, X86::sub_t1);
692 unsigned TmmSize =
TRI->getRegSizeInBits(X86::TILERegClass) / 8;
700 MIBLo.
add(
MBBI->getOperand(i));
702 MIBHi.
addImm(Disp + TmmSize);
704 MIBHi.
add(
MBBI->getOperand(i));
727 case X86::PT2RPNTLVWZ0V:
728 case X86::PT2RPNTLVWZ0T1V:
729 case X86::PT2RPNTLVWZ1V:
730 case X86::PT2RPNTLVWZ1T1V:
731 case X86::PT2RPNTLVWZ0RSV:
732 case X86::PT2RPNTLVWZ0RST1V:
733 case X86::PT2RPNTLVWZ1RSV:
734 case X86::PT2RPNTLVWZ1RST1V: {
735 for (
unsigned i = 3; i > 0; --i)
739 case X86::PT2RPNTLVWZ0V:
740 Opc = X86::T2RPNTLVWZ0;
742 case X86::PT2RPNTLVWZ0T1V:
743 Opc = X86::T2RPNTLVWZ0T1;
745 case X86::PT2RPNTLVWZ1V:
746 Opc = X86::T2RPNTLVWZ1;
748 case X86::PT2RPNTLVWZ1T1V:
749 Opc = X86::T2RPNTLVWZ1T1;
751 case X86::PT2RPNTLVWZ0RSV:
752 Opc = X86::T2RPNTLVWZ0RS;
754 case X86::PT2RPNTLVWZ0RST1V:
755 Opc = X86::T2RPNTLVWZ0RST1;
757 case X86::PT2RPNTLVWZ1RSV:
758 Opc = X86::T2RPNTLVWZ1RS;
760 case X86::PT2RPNTLVWZ1RST1V:
761 Opc = X86::T2RPNTLVWZ1RST1;
766 MI.setDesc(
TII->get(Opc));
769 case X86::PTTRANSPOSEDV:
770 case X86::PTCONJTFP16V: {
771 for (
int i = 2; i > 0; --i)
773 MI.setDesc(
TII->get(Opcode == X86::PTTRANSPOSEDV ? X86::TTRANSPOSED
777 case X86::PTCMMIMFP16PSV:
778 case X86::PTCMMRLFP16PSV:
783 case X86::PTDPBF16PSV:
784 case X86::PTDPFP16PSV:
785 case X86::PTTDPBF16PSV:
786 case X86::PTTDPFP16PSV:
787 case X86::PTTCMMIMFP16PSV:
788 case X86::PTTCMMRLFP16PSV:
789 case X86::PTCONJTCMMIMFP16PSV:
790 case X86::PTMMULTF32PSV:
791 case X86::PTTMMULTF32PSV:
792 case X86::PTDPBF8PSV:
793 case X86::PTDPBHF8PSV:
794 case X86::PTDPHBF8PSV:
795 case X86::PTDPHF8PSV: {
796 MI.untieRegOperand(4);
797 for (
unsigned i = 3; i > 0; --i)
801 case X86::PTCMMIMFP16PSV: Opc = X86::TCMMIMFP16PS;
break;
802 case X86::PTCMMRLFP16PSV: Opc = X86::TCMMRLFP16PS;
break;
803 case X86::PTDPBSSDV: Opc = X86::TDPBSSD;
break;
804 case X86::PTDPBSUDV: Opc = X86::TDPBSUD;
break;
805 case X86::PTDPBUSDV: Opc = X86::TDPBUSD;
break;
806 case X86::PTDPBUUDV: Opc = X86::TDPBUUD;
break;
807 case X86::PTDPBF16PSV: Opc = X86::TDPBF16PS;
break;
808 case X86::PTDPFP16PSV: Opc = X86::TDPFP16PS;
break;
809 case X86::PTTDPBF16PSV:
810 Opc = X86::TTDPBF16PS;
812 case X86::PTTDPFP16PSV:
813 Opc = X86::TTDPFP16PS;
815 case X86::PTTCMMIMFP16PSV:
816 Opc = X86::TTCMMIMFP16PS;
818 case X86::PTTCMMRLFP16PSV:
819 Opc = X86::TTCMMRLFP16PS;
821 case X86::PTCONJTCMMIMFP16PSV:
822 Opc = X86::TCONJTCMMIMFP16PS;
824 case X86::PTMMULTF32PSV:
825 Opc = X86::TMMULTF32PS;
827 case X86::PTTMMULTF32PSV:
828 Opc = X86::TTMMULTF32PS;
830 case X86::PTDPBF8PSV:
833 case X86::PTDPBHF8PSV:
834 Opc = X86::TDPBHF8PS;
836 case X86::PTDPHBF8PSV:
837 Opc = X86::TDPHBF8PS;
839 case X86::PTDPHF8PSV:
846 MI.setDesc(
TII->get(Opc));
847 MI.tieOperands(0, 1);
850 case X86::PTILESTOREDV: {
851 for (
int i = 1; i >= 0; --i)
856#undef GET_EGPR_IF_ENABLED
857 case X86::PTILEZEROV: {
858 for (
int i = 2; i > 0; --i)
860 MI.setDesc(
TII->get(X86::TILEZERO));
863 case X86::CALL64pcrel32_RVMARKER:
864 case X86::CALL64r_RVMARKER:
865 case X86::CALL64m_RVMARKER:
866 expandCALL_RVMARKER(
MBB,
MBBI);
868 case X86::ADD32mi_ND:
869 case X86::ADD64mi32_ND:
870 case X86::SUB32mi_ND:
871 case X86::SUB64mi32_ND:
872 case X86::AND32mi_ND:
873 case X86::AND64mi32_ND:
875 case X86::OR64mi32_ND:
876 case X86::XOR32mi_ND:
877 case X86::XOR64mi32_ND:
878 case X86::ADC32mi_ND:
879 case X86::ADC64mi32_ND:
880 case X86::SBB32mi_ND:
881 case X86::SBB64mi32_ND: {
899 MI.getOperand(
MI.getNumExplicitOperands() - 1);
914 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(
Base) ||
915 X86MCRegisterClasses[X86::GR32RegClassID].
contains(Index))
919 unsigned Opc, LoadOpc;
921#define MI_TO_RI(OP) \
922 case X86::OP##32mi_ND: \
923 Opc = X86::OP##32ri; \
924 LoadOpc = X86::MOV32rm; \
926 case X86::OP##64mi32_ND: \
927 Opc = X86::OP##64ri32; \
928 LoadOpc = X86::MOV64rm; \
948 for (
unsigned I =
MI.getNumImplicitOperands() + 1;
I != 0; --
I)
949 MI.removeOperand(
MI.getNumOperands() - 1);
950 MI.setDesc(
TII->get(LoadOpc));
971void X86ExpandPseudo::expandVastartSaveXmmRegs(
974 assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS);
978 const DebugLoc &
DL = VAStartPseudoInstr->getDebugLoc();
979 Register CountReg = VAStartPseudoInstr->getOperand(0).getReg();
985 LiveRegs.addLiveIns(*EntryBlk);
987 if (
MI.getOpcode() == VAStartPseudoInstr->getOpcode())
990 LiveRegs.stepForward(
MI, Clobbers);
1000 Func->insert(EntryBlkIter, GuardedRegsBlk);
1001 Func->insert(EntryBlkIter, TailBlk);
1009 uint64_t FrameOffset = VAStartPseudoInstr->getOperand(4).getImm();
1010 uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(6).getImm();
1013 unsigned MOVOpc = STI->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
1016 for (int64_t OpndIdx = 7, RegIdx = 0;
1017 OpndIdx < VAStartPseudoInstr->getNumOperands() - 1;
1018 OpndIdx++, RegIdx++) {
1019 auto NewMI =
BuildMI(GuardedRegsBlk,
DL,
TII->get(MOVOpc));
1022 NewMI.addImm(FrameOffset + VarArgsRegsOffset + RegIdx * 16);
1024 NewMI.add(VAStartPseudoInstr->getOperand(i + 1));
1026 NewMI.addReg(VAStartPseudoInstr->getOperand(OpndIdx).getReg());
1027 assert(VAStartPseudoInstr->getOperand(OpndIdx).getReg().isPhysical());
1035 if (!STI->isCallingConvWin64(
Func->getFunction().getCallingConv())) {
1051 VAStartPseudoInstr->eraseFromParent();
1070bool X86ExpandPseudo::expandPseudosWhichAffectControlFlow(
MachineFunction &MF) {
1075 if (
Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) {
1076 expandVastartSaveXmmRegs(&(MF.
front()), Instr);
1086 TII = STI->getInstrInfo();
1087 TRI = STI->getRegisterInfo();
1089 X86FL = STI->getFrameLowering();
1091 bool Modified = expandPseudosWhichAffectControlFlow(MF);
1100 return new X86ExpandPseudo();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static Target * FirstTarget
#define GET_EGPR_IF_ENABLED(OPC)
#define X86_EXPAND_PSEUDO_NAME
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
void setPreservesCFG()
This function should be called by the pass, iff they do not:
LLVM Basic Block Representation.
This class represents an Operation in the Expression.
FunctionPass class - This class is used to implement most global optimizations.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
void moveCallSiteInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
void setIsKill(bool Val=true)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
int64_t getOffset() const
Return the offset from the symbol in this operand.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
Target - Wrapper for Target specific information.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
bool needSIB(MCRegister BaseReg, MCRegister IndexReg, bool In64BitMode)
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
NodeAddr< InstrNode * > Instr
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static bool isMem(const MachineInstr &MI, unsigned Op)
char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
unsigned getDeadRegState(bool B)
char & MachineLoopInfoID
MachineLoopInfo - This pass is a loop analysis pass.
FunctionPass * createX86ExpandPseudoPass()
Return a Machine IR pass that expands X86-specific pseudo instructions into a sequence of actual inst...
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs)
Adds registers contained in LiveRegs to the block live-in list of MBB.