39#include "llvm/IR/IntrinsicsX86.h"
50#define DEBUG_TYPE "X86-isel"
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
74 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
75 Align Alignment)
const;
110 const unsigned DstReg,
112 const unsigned SrcReg,
123 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
126 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
139#define GET_GLOBALISEL_PREDICATES_DECL
140#include "X86GenGlobalISel.inc"
141#undef GET_GLOBALISEL_PREDICATES_DECL
143#define GET_GLOBALISEL_TEMPORARIES_DECL
144#include "X86GenGlobalISel.inc"
145#undef GET_GLOBALISEL_TEMPORARIES_DECL
150#define GET_GLOBALISEL_IMPL
151#include "X86GenGlobalISel.inc"
152#undef GET_GLOBALISEL_IMPL
157 :
TM(
TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
160#include
"X86GenGlobalISel.inc"
163#include
"X86GenGlobalISel.inc"
171X86InstructionSelector::getRegClass(
LLT Ty,
const RegisterBank &RB)
const {
172 if (RB.
getID() == X86::GPRRegBankID) {
174 return &X86::GR8RegClass;
176 return &X86::GR16RegClass;
178 return &X86::GR32RegClass;
180 return &X86::GR64RegClass;
182 if (RB.
getID() == X86::VECRRegBankID) {
184 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
186 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
188 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
190 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
192 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
194 return &X86::VR512RegClass;
201X86InstructionSelector::getRegClass(
LLT Ty,
unsigned Reg,
208 unsigned SubIdx = X86::NoSubRegister;
209 if (RC == &X86::GR32RegClass) {
210 SubIdx = X86::sub_32bit;
211 }
else if (RC == &X86::GR16RegClass) {
212 SubIdx = X86::sub_16bit;
213 }
else if (RC == &X86::GR8RegClass) {
214 SubIdx = X86::sub_8bit;
222 if (X86::GR64RegClass.
contains(Reg))
223 return &X86::GR64RegClass;
224 if (X86::GR32RegClass.
contains(Reg))
225 return &X86::GR32RegClass;
226 if (X86::GR16RegClass.
contains(Reg))
227 return &X86::GR16RegClass;
229 return &X86::GR8RegClass;
237bool X86InstructionSelector::selectDebugInstr(
MachineInstr &
I,
245 if (
Reg.isPhysical())
247 LLT Ty =
MRI.getType(Reg);
250 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
252 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
256 dbgs() <<
"Warning: DBG_VALUE operand has unexpected size/bank\n");
260 RBI.constrainGenericRegister(Reg, *RC,
MRI);
269 Register DstReg =
I.getOperand(0).getReg();
270 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
273 Register SrcReg =
I.getOperand(1).getReg();
274 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
278 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
280 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
281 DstRegBank.
getID() == X86::GPRRegBankID) {
287 if (SrcRC != DstRC) {
289 Register ExtSrc =
MRI.createVirtualRegister(DstRC);
291 TII.get(TargetOpcode::SUBREG_TO_REG))
295 .
addImm(getSubRegIndex(SrcRC));
297 I.getOperand(1).setReg(ExtSrc);
305 "No phys reg on generic operators");
306 assert((DstSize == SrcSize ||
310 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
311 "Copy with different width?!");
316 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
317 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
323 if (DstRC != SrcRC) {
324 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
325 I.getOperand(1).substPhysReg(SrcReg,
TRI);
334 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
340 I.setDesc(
TII.get(X86::COPY));
345 assert(
I.getParent() &&
"Instruction should be in a basic block!");
346 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
352 unsigned Opcode =
I.getOpcode();
356 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
362 if (
I.isDebugInstr())
368 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
369 "Generic instruction has unexpected implicit operands\n");
371 if (selectImpl(
I, *CoverageInfo))
377 switch (
I.getOpcode()) {
380 case TargetOpcode::G_STORE:
381 case TargetOpcode::G_LOAD:
383 case TargetOpcode::G_PTR_ADD:
384 case TargetOpcode::G_FRAME_INDEX:
385 return selectFrameIndexOrGep(
I,
MRI, MF);
386 case TargetOpcode::G_GLOBAL_VALUE:
387 return selectGlobalValue(
I,
MRI, MF);
388 case TargetOpcode::G_CONSTANT:
389 return selectConstant(
I,
MRI, MF);
390 case TargetOpcode::G_FCONSTANT:
391 return materializeFP(
I,
MRI, MF);
392 case TargetOpcode::G_PTRTOINT:
393 case TargetOpcode::G_TRUNC:
394 return selectTruncOrPtrToInt(
I,
MRI, MF);
395 case TargetOpcode::G_INTTOPTR:
397 case TargetOpcode::G_ZEXT:
398 return selectZext(
I,
MRI, MF);
399 case TargetOpcode::G_ANYEXT:
400 return selectAnyext(
I,
MRI, MF);
401 case TargetOpcode::G_ICMP:
402 return selectCmp(
I,
MRI, MF);
403 case TargetOpcode::G_FCMP:
404 return selectFCmp(
I,
MRI, MF);
405 case TargetOpcode::G_UADDE:
406 case TargetOpcode::G_UADDO:
407 case TargetOpcode::G_USUBE:
408 case TargetOpcode::G_USUBO:
409 return selectUAddSub(
I,
MRI, MF);
410 case TargetOpcode::G_UNMERGE_VALUES:
412 case TargetOpcode::G_MERGE_VALUES:
413 case TargetOpcode::G_CONCAT_VECTORS:
415 case TargetOpcode::G_EXTRACT:
416 return selectExtract(
I,
MRI, MF);
417 case TargetOpcode::G_INSERT:
418 return selectInsert(
I,
MRI, MF);
419 case TargetOpcode::G_BRCOND:
420 return selectCondBranch(
I,
MRI, MF);
421 case TargetOpcode::G_IMPLICIT_DEF:
422 case TargetOpcode::G_PHI:
423 return selectImplicitDefOrPHI(
I,
MRI);
424 case TargetOpcode::G_MUL:
425 case TargetOpcode::G_SMULH:
426 case TargetOpcode::G_UMULH:
427 case TargetOpcode::G_SDIV:
428 case TargetOpcode::G_UDIV:
429 case TargetOpcode::G_SREM:
430 case TargetOpcode::G_UREM:
431 return selectMulDivRem(
I,
MRI, MF);
432 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
433 return selectIntrinsicWSideEffects(
I,
MRI, MF);
439unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
442 Align Alignment)
const {
443 bool Isload = (Opc == TargetOpcode::G_LOAD);
444 bool HasAVX = STI.hasAVX();
445 bool HasAVX512 = STI.hasAVX512();
446 bool HasVLX = STI.hasVLX();
449 if (X86::GPRRegBankID == RB.
getID())
450 return Isload ? X86::MOV8rm : X86::MOV8mr;
452 if (X86::GPRRegBankID == RB.
getID())
453 return Isload ? X86::MOV16rm : X86::MOV16mr;
455 if (X86::GPRRegBankID == RB.
getID())
456 return Isload ? X86::MOV32rm : X86::MOV32mr;
457 if (X86::VECRRegBankID == RB.
getID())
458 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
459 HasAVX ? X86::VMOVSSrm_alt :
461 : (HasAVX512 ? X86::VMOVSSZmr :
462 HasAVX ? X86::VMOVSSmr :
465 if (X86::GPRRegBankID == RB.
getID())
466 return Isload ? X86::MOV64rm : X86::MOV64mr;
467 if (X86::VECRRegBankID == RB.
getID())
468 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
469 HasAVX ? X86::VMOVSDrm_alt :
471 : (HasAVX512 ? X86::VMOVSDZmr :
472 HasAVX ? X86::VMOVSDmr :
475 if (Alignment >=
Align(16))
476 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
478 ? X86::VMOVAPSZ128rm_NOVLX
479 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
480 : (HasVLX ? X86::VMOVAPSZ128mr
482 ? X86::VMOVAPSZ128mr_NOVLX
483 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
485 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
487 ? X86::VMOVUPSZ128rm_NOVLX
488 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
489 : (HasVLX ? X86::VMOVUPSZ128mr
491 ? X86::VMOVUPSZ128mr_NOVLX
492 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
494 if (Alignment >=
Align(32))
495 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
496 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
498 : (HasVLX ? X86::VMOVAPSZ256mr
499 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
502 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
503 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
505 : (HasVLX ? X86::VMOVUPSZ256mr
506 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
509 if (Alignment >=
Align(64))
510 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
512 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
521 assert(
I.getOperand(0).isReg() &&
"unsupported opperand.");
522 assert(
MRI.getType(
I.getOperand(0).getReg()).isPointer() &&
523 "unsupported type.");
525 if (
I.getOpcode() == TargetOpcode::G_PTR_ADD) {
528 if (isInt<32>(Imm)) {
529 AM.
Disp =
static_cast<int32_t
>(Imm);
530 AM.
Base.
Reg =
I.getOperand(1).getReg();
534 }
else if (
I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
541 AM.
Base.
Reg =
I.getOperand(0).getReg();
544bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
547 unsigned Opc =
I.getOpcode();
549 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
550 "unexpected instruction");
552 const Register DefReg =
I.getOperand(0).getReg();
553 LLT Ty =
MRI.getType(DefReg);
557 auto &
MemOp = **
I.memoperands_begin();
558 if (
MemOp.isAtomic()) {
564 if (!
MemOp.isUnordered()) {
574 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc,
MemOp.getAlign());
581 I.setDesc(
TII.get(NewOpc));
583 if (Opc == TargetOpcode::G_LOAD) {
604bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
607 unsigned Opc =
I.getOpcode();
609 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
610 "unexpected instruction");
612 const Register DefReg =
I.getOperand(0).getReg();
613 LLT Ty =
MRI.getType(DefReg);
616 unsigned NewOpc =
getLeaOP(Ty, STI);
617 I.setDesc(
TII.get(NewOpc));
620 if (Opc == TargetOpcode::G_FRAME_INDEX) {
626 MIB.addImm(0).addReg(0);
632bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
635 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
636 "unexpected instruction");
638 auto GV =
I.getOperand(1).getGlobal();
639 if (GV->isThreadLocal()) {
649 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
659 if (STI.isPICStyleRIPRel()) {
665 const Register DefReg =
I.getOperand(0).getReg();
666 LLT Ty =
MRI.getType(DefReg);
667 unsigned NewOpc =
getLeaOP(Ty, STI);
669 I.setDesc(
TII.get(NewOpc));
681 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
682 "unexpected instruction");
684 const Register DefReg =
I.getOperand(0).getReg();
685 LLT Ty =
MRI.getType(DefReg);
687 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
691 if (
I.getOperand(1).isCImm()) {
692 Val =
I.getOperand(1).getCImm()->getZExtValue();
693 I.getOperand(1).ChangeToImmediate(Val);
694 }
else if (
I.getOperand(1).isImm()) {
695 Val =
I.getOperand(1).getImm();
702 NewOpc = X86::MOV8ri;
705 NewOpc = X86::MOV16ri;
708 NewOpc = X86::MOV32ri;
713 NewOpc = X86::MOV64ri32;
715 NewOpc = X86::MOV64ri;
721 I.setDesc(
TII.get(NewOpc));
730 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
731 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
732 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
735bool X86InstructionSelector::selectTurnIntoCOPY(
740 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
741 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
746 I.setDesc(
TII.get(X86::COPY));
750bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
753 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
754 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
755 "unexpected instruction");
757 const Register DstReg =
I.getOperand(0).getReg();
758 const Register SrcReg =
I.getOperand(1).getReg();
760 const LLT DstTy =
MRI.getType(DstReg);
761 const LLT SrcTy =
MRI.getType(SrcReg);
768 <<
" input/output on different banks\n");
775 if (!DstRC || !SrcRC)
782 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
784 if (DstRB.
getID() != X86::GPRRegBankID)
788 if (DstRC == SrcRC) {
790 SubIdx = X86::NoSubRegister;
791 }
else if (DstRC == &X86::GR32RegClass) {
792 SubIdx = X86::sub_32bit;
793 }
else if (DstRC == &X86::GR16RegClass) {
794 SubIdx = X86::sub_16bit;
795 }
else if (DstRC == &X86::GR8RegClass) {
796 SubIdx = X86::sub_8bit;
801 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
803 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
804 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
810 I.getOperand(1).setSubReg(SubIdx);
812 I.setDesc(
TII.get(X86::COPY));
819 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
821 const Register DstReg =
I.getOperand(0).getReg();
822 const Register SrcReg =
I.getOperand(1).getReg();
824 const LLT DstTy =
MRI.getType(DstReg);
825 const LLT SrcTy =
MRI.getType(SrcReg);
828 "8=>16 Zext is handled by tablegen");
830 "8=>32 Zext is handled by tablegen");
832 "16=>32 Zext is handled by tablegen");
834 "8=>64 Zext is handled by tablegen");
836 "16=>64 Zext is handled by tablegen");
838 "32=>64 Zext is handled by tablegen");
845 AndOpc = X86::AND8ri;
847 AndOpc = X86::AND16ri;
849 AndOpc = X86::AND32ri;
851 AndOpc = X86::AND64ri32;
860 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
864 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
871 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
884 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
886 const Register DstReg =
I.getOperand(0).getReg();
887 const Register SrcReg =
I.getOperand(1).getReg();
889 const LLT DstTy =
MRI.getType(DstReg);
890 const LLT SrcTy =
MRI.getType(SrcReg);
896 "G_ANYEXT input/output on different banks\n");
899 "G_ANYEXT incorrect operand size");
908 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
910 if (DstRB.
getID() != X86::GPRRegBankID)
913 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
914 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
920 if (SrcRC == DstRC) {
921 I.setDesc(
TII.get(X86::COPY));
926 TII.get(TargetOpcode::SUBREG_TO_REG))
930 .
addImm(getSubRegIndex(SrcRC));
939 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
953 LLT Ty =
MRI.getType(LHS);
962 OpCmp = X86::CMP16rr;
965 OpCmp = X86::CMP32rr;
968 OpCmp = X86::CMP64rr;
978 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(
CC);
990 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
992 Register LhsReg =
I.getOperand(2).getReg();
993 Register RhsReg =
I.getOperand(3).getReg();
998 static const uint16_t SETFOpcTable[2][3] = {
1002 switch (Predicate) {
1006 SETFOpc = &SETFOpcTable[0][0];
1009 SETFOpc = &SETFOpcTable[1][0];
1015 LLT Ty =
MRI.getType(LhsReg);
1020 OpCmp = X86::UCOMISSrr;
1023 OpCmp = X86::UCOMISDrr;
1027 Register ResultReg =
I.getOperand(0).getReg();
1028 RBI.constrainGenericRegister(
1033 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1037 Register FlagReg1 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1038 Register FlagReg2 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1040 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1042 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1044 TII.get(SETFOpc[2]), ResultReg)
1052 I.eraseFromParent();
1066 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1074 I.eraseFromParent();
1081 assert((
I.getOpcode() == TargetOpcode::G_UADDE ||
1082 I.getOpcode() == TargetOpcode::G_UADDO ||
1083 I.getOpcode() == TargetOpcode::G_USUBE ||
1084 I.getOpcode() == TargetOpcode::G_USUBO) &&
1085 "unexpected instruction");
1087 const Register DstReg =
I.getOperand(0).getReg();
1088 const Register CarryOutReg =
I.getOperand(1).getReg();
1089 const Register Op0Reg =
I.getOperand(2).getReg();
1090 const Register Op1Reg =
I.getOperand(3).getReg();
1091 bool IsSub =
I.getOpcode() == TargetOpcode::G_USUBE ||
1092 I.getOpcode() == TargetOpcode::G_USUBO;
1093 bool HasCarryIn =
I.getOpcode() == TargetOpcode::G_UADDE ||
1094 I.getOpcode() == TargetOpcode::G_USUBE;
1096 const LLT DstTy =
MRI.getType(DstReg);
1097 assert(DstTy.
isScalar() &&
"selectUAddSub only supported for scalar types");
1100 unsigned OpADC, OpADD, OpSBB, OpSUB;
1103 OpADC = X86::ADC8rr;
1104 OpADD = X86::ADD8rr;
1105 OpSBB = X86::SBB8rr;
1106 OpSUB = X86::SUB8rr;
1109 OpADC = X86::ADC16rr;
1110 OpADD = X86::ADD16rr;
1111 OpSBB = X86::SBB16rr;
1112 OpSUB = X86::SUB16rr;
1115 OpADC = X86::ADC32rr;
1116 OpADD = X86::ADD32rr;
1117 OpSBB = X86::SBB32rr;
1118 OpSUB = X86::SUB32rr;
1121 OpADC = X86::ADC64rr;
1122 OpADD = X86::ADD64rr;
1123 OpSBB = X86::SBB64rr;
1124 OpSUB = X86::SUB64rr;
1133 unsigned Opcode = IsSub ? OpSUB : OpADD;
1137 Register CarryInReg =
I.getOperand(4).getReg();
1139 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1140 CarryInReg =
Def->getOperand(1).getReg();
1141 Def =
MRI.getVRegDef(CarryInReg);
1145 if (
Def->getOpcode() == TargetOpcode::G_UADDE ||
1146 Def->getOpcode() == TargetOpcode::G_UADDO ||
1147 Def->getOpcode() == TargetOpcode::G_USUBE ||
1148 Def->getOpcode() == TargetOpcode::G_USUBO) {
1150 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY),
1154 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC,
MRI))
1157 Opcode = IsSub ? OpSBB : OpADC;
1163 Opcode = IsSub ? OpSUB : OpADD;
1169 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1173 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1177 !RBI.constrainGenericRegister(CarryOutReg, *DstRC,
MRI))
1180 I.eraseFromParent();
1187 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1188 "unexpected instruction");
1190 const Register DstReg =
I.getOperand(0).getReg();
1191 const Register SrcReg =
I.getOperand(1).getReg();
1192 int64_t
Index =
I.getOperand(2).getImm();
1194 const LLT DstTy =
MRI.getType(DstReg);
1195 const LLT SrcTy =
MRI.getType(SrcReg);
1206 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1209 I.eraseFromParent();
1213 bool HasAVX = STI.hasAVX();
1214 bool HasAVX512 = STI.hasAVX512();
1215 bool HasVLX = STI.hasVLX();
1219 I.setDesc(
TII.get(X86::VEXTRACTF32x4Z256rr));
1221 I.setDesc(
TII.get(X86::VEXTRACTF128rr));
1226 I.setDesc(
TII.get(X86::VEXTRACTF32x4Zrr));
1228 I.setDesc(
TII.get(X86::VEXTRACTF64x4Zrr));
1236 I.getOperand(2).setImm(
Index);
1241bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1245 const LLT DstTy =
MRI.getType(DstReg);
1246 const LLT SrcTy =
MRI.getType(SrcReg);
1247 unsigned SubIdx = X86::NoSubRegister;
1253 "Incorrect Src/Dst register size");
1256 SubIdx = X86::sub_xmm;
1258 SubIdx = X86::sub_ymm;
1265 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1267 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1268 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1273 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1274 .
addReg(SrcReg, 0, SubIdx);
1279bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1283 const LLT DstTy =
MRI.getType(DstReg);
1284 const LLT SrcTy =
MRI.getType(SrcReg);
1285 unsigned SubIdx = X86::NoSubRegister;
1292 "Incorrect Src/Dst register size");
1295 SubIdx = X86::sub_xmm;
1297 SubIdx = X86::sub_ymm;
1304 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1305 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1310 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1320 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1322 const Register DstReg =
I.getOperand(0).getReg();
1323 const Register SrcReg =
I.getOperand(1).getReg();
1324 const Register InsertReg =
I.getOperand(2).getReg();
1325 int64_t
Index =
I.getOperand(3).getImm();
1327 const LLT DstTy =
MRI.getType(DstReg);
1328 const LLT InsertRegTy =
MRI.getType(InsertReg);
1337 if (
Index == 0 &&
MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1339 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1342 I.eraseFromParent();
1346 bool HasAVX = STI.hasAVX();
1347 bool HasAVX512 = STI.hasAVX512();
1348 bool HasVLX = STI.hasVLX();
1352 I.setDesc(
TII.get(X86::VINSERTF32x4Z256rr));
1354 I.setDesc(
TII.get(X86::VINSERTF128rr));
1359 I.setDesc(
TII.get(X86::VINSERTF32x4Zrr));
1361 I.setDesc(
TII.get(X86::VINSERTF64x4Zrr));
1370 I.getOperand(3).setImm(
Index);
1375bool X86InstructionSelector::selectUnmergeValues(
1377 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1378 "unexpected instruction");
1381 unsigned NumDefs =
I.getNumOperands() - 1;
1382 Register SrcReg =
I.getOperand(NumDefs).getReg();
1383 unsigned DefSize =
MRI.getType(
I.getOperand(0).getReg()).getSizeInBits();
1385 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1388 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(
Idx).getReg())
1392 if (!select(ExtrInst))
1396 I.eraseFromParent();
1400bool X86InstructionSelector::selectMergeValues(
1402 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1403 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1404 "unexpected instruction");
1407 Register DstReg =
I.getOperand(0).getReg();
1408 Register SrcReg0 =
I.getOperand(1).getReg();
1410 const LLT DstTy =
MRI.getType(DstReg);
1411 const LLT SrcTy =
MRI.getType(SrcReg0);
1417 Register DefReg =
MRI.createGenericVirtualRegister(DstTy);
1418 MRI.setRegBank(DefReg, RegBank);
1419 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1422 for (
unsigned Idx = 2;
Idx <
I.getNumOperands(); ++
Idx) {
1423 Register Tmp =
MRI.createGenericVirtualRegister(DstTy);
1424 MRI.setRegBank(Tmp, RegBank);
1427 TII.get(TargetOpcode::G_INSERT), Tmp)
1434 if (!select(InsertInst))
1439 TII.get(TargetOpcode::COPY), DstReg)
1442 if (!select(CopyInst))
1445 I.eraseFromParent();
1449bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1452 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1454 const Register CondReg =
I.getOperand(0).getReg();
1458 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1461 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1466 I.eraseFromParent();
1473 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1474 "unexpected instruction");
1481 const Register DstReg =
I.getOperand(0).getReg();
1482 const LLT DstTy =
MRI.getType(DstReg);
1485 const DebugLoc &DbgLoc =
I.getDebugLoc();
1488 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1491 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1494 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1500 Register AddrReg =
MRI.createVirtualRegister(&X86::GR64RegClass);
1501 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1518 unsigned PICBase = 0;
1527 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(Opc), DstReg), CPI, PICBase,
1533 I.eraseFromParent();
1537bool X86InstructionSelector::selectImplicitDefOrPHI(
1539 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1540 I.getOpcode() == TargetOpcode::G_PHI) &&
1541 "unexpected instruction");
1543 Register DstReg =
I.getOperand(0).getReg();
1545 if (!
MRI.getRegClassOrNull(DstReg)) {
1546 const LLT DstTy =
MRI.getType(DstReg);
1549 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1556 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1557 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1559 I.setDesc(
TII.get(X86::PHI));
1564bool X86InstructionSelector::selectMulDivRem(
MachineInstr &
I,
1568 assert((
I.getOpcode() == TargetOpcode::G_MUL ||
1569 I.getOpcode() == TargetOpcode::G_SMULH ||
1570 I.getOpcode() == TargetOpcode::G_UMULH ||
1571 I.getOpcode() == TargetOpcode::G_SDIV ||
1572 I.getOpcode() == TargetOpcode::G_SREM ||
1573 I.getOpcode() == TargetOpcode::G_UDIV ||
1574 I.getOpcode() == TargetOpcode::G_UREM) &&
1575 "unexpected instruction");
1577 const Register DstReg =
I.getOperand(0).getReg();
1578 const Register Op1Reg =
I.getOperand(1).getReg();
1579 const Register Op2Reg =
I.getOperand(2).getReg();
1581 const LLT RegTy =
MRI.getType(DstReg);
1582 assert(RegTy ==
MRI.getType(Op1Reg) && RegTy ==
MRI.getType(Op2Reg) &&
1583 "Arguments and return value types must match");
1586 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1589 const static unsigned NumTypes = 4;
1590 const static unsigned NumOps = 7;
1591 const static bool S =
true;
1592 const static bool U =
false;
1593 const static unsigned Copy = TargetOpcode::COPY;
1603 const static struct MulDivRemEntry {
1605 unsigned SizeInBits;
1609 struct MulDivRemResult {
1610 unsigned OpMulDivRem;
1611 unsigned OpSignExtend;
1617 } ResultTable[NumOps];
1618 } OpTable[NumTypes] = {
1623 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},
1624 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1625 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U},
1626 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1627 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S},
1628 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S},
1629 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1635 {X86::IDIV16r, X86::CWD,
Copy, X86::AX, S},
1636 {X86::IDIV16r, X86::CWD,
Copy, X86::DX, S},
1637 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U},
1638 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U},
1639 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::AX, S},
1640 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::DX, S},
1641 {X86::MUL16r, X86::MOV32r0,
Copy, X86::DX,
U},
1647 {X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S},
1648 {X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S},
1649 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U},
1650 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1651 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EAX, S},
1652 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EDX, S},
1653 {X86::MUL32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1659 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S},
1660 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S},
1661 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U},
1662 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1663 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RAX, S},
1664 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RDX, S},
1665 {X86::MUL64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1669 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const MulDivRemEntry &El) {
1672 if (OpEntryIt == std::end(OpTable))
1676 switch (
I.getOpcode()) {
1679 case TargetOpcode::G_SDIV:
1682 case TargetOpcode::G_SREM:
1685 case TargetOpcode::G_UDIV:
1688 case TargetOpcode::G_UREM:
1691 case TargetOpcode::G_MUL:
1694 case TargetOpcode::G_SMULH:
1697 case TargetOpcode::G_UMULH:
1702 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1703 const MulDivRemEntry::MulDivRemResult &OpEntry =
1704 TypeEntry.ResultTable[
OpIndex];
1707 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1708 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1709 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1716 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1721 if (OpEntry.OpSignExtend) {
1722 if (OpEntry.IsOpSigned)
1724 TII.get(OpEntry.OpSignExtend));
1726 Register Zero32 =
MRI.createVirtualRegister(&X86::GR32RegClass);
1727 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1735 TypeEntry.HighInReg)
1736 .
addReg(Zero32, 0, X86::sub_16bit);
1739 TypeEntry.HighInReg)
1743 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1752 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpMulDivRem))
1763 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1764 Register SourceSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1765 Register ResultSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1766 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1770 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1777 TII.get(TargetOpcode::SUBREG_TO_REG))
1783 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1785 .
addReg(OpEntry.ResultReg);
1787 I.eraseFromParent();
1792bool X86InstructionSelector::selectIntrinsicWSideEffects(
1795 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1796 "unexpected instruction");
1798 if (
I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1801 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TRAP));
1803 I.eraseFromParent();
1811 return new X86InstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Atomic ordering constants.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
ConstantFP - Floating Point Values [float, double].
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
An instruction for reading from memory.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
union llvm::X86AddressMode::@609 Base
enum llvm::X86AddressMode::@608 BaseType