40#include "llvm/IR/IntrinsicsX86.h"
51#define DEBUG_TYPE "X86-isel"
57#define GET_GLOBALISEL_PREDICATE_BITSET
58#include "X86GenGlobalISel.inc"
59#undef GET_GLOBALISEL_PREDICATE_BITSET
75 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
76 Align Alignment)
const;
111 const unsigned DstReg,
113 const unsigned SrcReg,
124 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
127 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
140#define GET_GLOBALISEL_PREDICATES_DECL
141#include "X86GenGlobalISel.inc"
142#undef GET_GLOBALISEL_PREDICATES_DECL
144#define GET_GLOBALISEL_TEMPORARIES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_TEMPORARIES_DECL
151#define GET_GLOBALISEL_IMPL
152#include "X86GenGlobalISel.inc"
153#undef GET_GLOBALISEL_IMPL
158 : TM(TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
161#include
"X86GenGlobalISel.inc"
164#include
"X86GenGlobalISel.inc"
172X86InstructionSelector::getRegClass(
LLT Ty,
const RegisterBank &RB)
const {
173 if (RB.
getID() == X86::GPRRegBankID) {
175 return &X86::GR8RegClass;
177 return &X86::GR16RegClass;
179 return &X86::GR32RegClass;
181 return &X86::GR64RegClass;
183 if (RB.
getID() == X86::VECRRegBankID) {
185 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
187 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
189 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
191 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
193 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
195 return &X86::VR512RegClass;
198 if (RB.
getID() == X86::PSRRegBankID) {
200 return &X86::RFP80RegClass;
202 return &X86::RFP64RegClass;
204 return &X86::RFP32RegClass;
211X86InstructionSelector::getRegClass(
LLT Ty,
unsigned Reg,
218 unsigned SubIdx = X86::NoSubRegister;
219 if (RC == &X86::GR32RegClass) {
220 SubIdx = X86::sub_32bit;
221 }
else if (RC == &X86::GR16RegClass) {
222 SubIdx = X86::sub_16bit;
223 }
else if (RC == &X86::GR8RegClass) {
224 SubIdx = X86::sub_8bit;
232 if (X86::GR64RegClass.
contains(Reg))
233 return &X86::GR64RegClass;
234 if (X86::GR32RegClass.
contains(Reg))
235 return &X86::GR32RegClass;
236 if (X86::GR16RegClass.
contains(Reg))
237 return &X86::GR16RegClass;
239 return &X86::GR8RegClass;
247bool X86InstructionSelector::selectDebugInstr(
MachineInstr &
I,
255 if (
Reg.isPhysical())
257 LLT Ty =
MRI.getType(Reg);
260 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
262 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
266 dbgs() <<
"Warning: DBG_VALUE operand has unexpected size/bank\n");
270 RBI.constrainGenericRegister(Reg, *RC,
MRI);
279 Register DstReg =
I.getOperand(0).getReg();
280 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
283 Register SrcReg =
I.getOperand(1).getReg();
284 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
288 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
290 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
291 DstRegBank.
getID() == X86::GPRRegBankID) {
297 if (SrcRC != DstRC) {
299 Register ExtSrc =
MRI.createVirtualRegister(DstRC);
301 TII.get(TargetOpcode::SUBREG_TO_REG))
305 .
addImm(getSubRegIndex(SrcRC));
307 I.getOperand(1).setReg(ExtSrc);
315 "No phys reg on generic operators");
316 assert((DstSize == SrcSize ||
320 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
321 "Copy with different width?!");
326 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
327 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
333 if (DstRC != SrcRC) {
334 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
335 I.getOperand(1).substPhysReg(SrcReg,
TRI);
344 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
350 I.setDesc(
TII.get(X86::COPY));
355 assert(
I.getParent() &&
"Instruction should be in a basic block!");
356 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
362 unsigned Opcode =
I.getOpcode();
366 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
372 if (
I.isDebugInstr())
378 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
379 "Generic instruction has unexpected implicit operands\n");
381 if (selectImpl(
I, *CoverageInfo))
387 switch (
I.getOpcode()) {
390 case TargetOpcode::G_STORE:
391 case TargetOpcode::G_LOAD:
393 case TargetOpcode::G_PTR_ADD:
394 case TargetOpcode::G_FRAME_INDEX:
395 return selectFrameIndexOrGep(
I,
MRI, MF);
396 case TargetOpcode::G_GLOBAL_VALUE:
397 return selectGlobalValue(
I,
MRI, MF);
398 case TargetOpcode::G_CONSTANT:
399 return selectConstant(
I,
MRI, MF);
400 case TargetOpcode::G_FCONSTANT:
401 return materializeFP(
I,
MRI, MF);
402 case TargetOpcode::G_PTRTOINT:
403 case TargetOpcode::G_TRUNC:
404 return selectTruncOrPtrToInt(
I,
MRI, MF);
405 case TargetOpcode::G_INTTOPTR:
407 case TargetOpcode::G_ZEXT:
408 return selectZext(
I,
MRI, MF);
409 case TargetOpcode::G_ANYEXT:
410 return selectAnyext(
I,
MRI, MF);
411 case TargetOpcode::G_ICMP:
412 return selectCmp(
I,
MRI, MF);
413 case TargetOpcode::G_FCMP:
414 return selectFCmp(
I,
MRI, MF);
415 case TargetOpcode::G_UADDE:
416 case TargetOpcode::G_UADDO:
417 case TargetOpcode::G_USUBE:
418 case TargetOpcode::G_USUBO:
419 return selectUAddSub(
I,
MRI, MF);
420 case TargetOpcode::G_UNMERGE_VALUES:
422 case TargetOpcode::G_MERGE_VALUES:
423 case TargetOpcode::G_CONCAT_VECTORS:
425 case TargetOpcode::G_EXTRACT:
426 return selectExtract(
I,
MRI, MF);
427 case TargetOpcode::G_INSERT:
428 return selectInsert(
I,
MRI, MF);
429 case TargetOpcode::G_BRCOND:
430 return selectCondBranch(
I,
MRI, MF);
431 case TargetOpcode::G_IMPLICIT_DEF:
432 case TargetOpcode::G_PHI:
433 return selectImplicitDefOrPHI(
I,
MRI);
434 case TargetOpcode::G_MUL:
435 case TargetOpcode::G_SMULH:
436 case TargetOpcode::G_UMULH:
437 case TargetOpcode::G_SDIV:
438 case TargetOpcode::G_UDIV:
439 case TargetOpcode::G_SREM:
440 case TargetOpcode::G_UREM:
441 return selectMulDivRem(
I,
MRI, MF);
442 case TargetOpcode::G_SELECT:
443 return selectSelect(
I,
MRI, MF);
449unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
452 Align Alignment)
const {
453 bool Isload = (Opc == TargetOpcode::G_LOAD);
454 bool HasAVX = STI.hasAVX();
455 bool HasAVX512 = STI.hasAVX512();
456 bool HasVLX = STI.hasVLX();
459 if (X86::GPRRegBankID == RB.
getID())
460 return Isload ? X86::MOV8rm : X86::MOV8mr;
462 if (X86::GPRRegBankID == RB.
getID())
463 return Isload ? X86::MOV16rm : X86::MOV16mr;
465 if (X86::GPRRegBankID == RB.
getID())
466 return Isload ? X86::MOV32rm : X86::MOV32mr;
467 if (X86::VECRRegBankID == RB.
getID())
468 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
469 HasAVX ? X86::VMOVSSrm_alt :
471 : (HasAVX512 ? X86::VMOVSSZmr :
472 HasAVX ? X86::VMOVSSmr :
474 if (X86::PSRRegBankID == RB.
getID())
475 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
477 if (X86::GPRRegBankID == RB.
getID())
478 return Isload ? X86::MOV64rm : X86::MOV64mr;
479 if (X86::VECRRegBankID == RB.
getID())
480 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
481 HasAVX ? X86::VMOVSDrm_alt :
483 : (HasAVX512 ? X86::VMOVSDZmr :
484 HasAVX ? X86::VMOVSDmr :
486 if (X86::PSRRegBankID == RB.
getID())
487 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
489 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
491 if (Alignment >=
Align(16))
492 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
494 ? X86::VMOVAPSZ128rm_NOVLX
495 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
496 : (HasVLX ? X86::VMOVAPSZ128mr
498 ? X86::VMOVAPSZ128mr_NOVLX
499 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
501 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
503 ? X86::VMOVUPSZ128rm_NOVLX
504 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
505 : (HasVLX ? X86::VMOVUPSZ128mr
507 ? X86::VMOVUPSZ128mr_NOVLX
508 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
510 if (Alignment >=
Align(32))
511 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
512 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
514 : (HasVLX ? X86::VMOVAPSZ256mr
515 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
518 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
519 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
521 : (HasVLX ? X86::VMOVUPSZ256mr
522 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
525 if (Alignment >=
Align(64))
526 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
528 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
537 assert(
I.getOperand(0).isReg() &&
"unsupported opperand.");
538 assert(
MRI.getType(
I.getOperand(0).getReg()).isPointer() &&
539 "unsupported type.");
541 if (
I.getOpcode() == TargetOpcode::G_PTR_ADD) {
544 if (isInt<32>(Imm)) {
545 AM.
Disp =
static_cast<int32_t
>(Imm);
546 AM.
Base.
Reg =
I.getOperand(1).getReg();
550 }
else if (
I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
557 AM.
Base.
Reg =
I.getOperand(0).getReg();
560bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
563 unsigned Opc =
I.getOpcode();
565 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
566 "Only G_STORE and G_LOAD are expected for selection");
568 const Register DefReg =
I.getOperand(0).getReg();
569 LLT Ty =
MRI.getType(DefReg);
573 auto &
MemOp = **
I.memoperands_begin();
574 if (
MemOp.isAtomic()) {
580 if (!
MemOp.isUnordered()) {
590 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc,
MemOp.getAlign());
594 I.setDesc(
TII.get(NewOpc));
598 if (
Ptr->getOpcode() == TargetOpcode::G_CONSTANT_POOL) {
599 assert(Opc == TargetOpcode::G_LOAD &&
600 "Only G_LOAD from constant pool is expected");
605 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
606 unsigned PICBase = 0;
608 PICBase =
TII.getGlobalBaseReg(&MF);
609 else if (STI.is64Bit())
620 if (Opc == TargetOpcode::G_LOAD) {
630 I.addImplicitDefUseOperands(MF);
643bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
646 unsigned Opc =
I.getOpcode();
648 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
649 "unexpected instruction");
651 const Register DefReg =
I.getOperand(0).getReg();
652 LLT Ty =
MRI.getType(DefReg);
655 unsigned NewOpc =
getLeaOP(Ty, STI);
656 I.setDesc(
TII.get(NewOpc));
659 if (Opc == TargetOpcode::G_FRAME_INDEX) {
665 MIB.addImm(0).addReg(0);
671bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
674 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
675 "unexpected instruction");
677 auto GV =
I.getOperand(1).getGlobal();
678 if (GV->isThreadLocal()) {
688 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
698 if (STI.isPICStyleRIPRel()) {
704 const Register DefReg =
I.getOperand(0).getReg();
705 LLT Ty =
MRI.getType(DefReg);
706 unsigned NewOpc =
getLeaOP(Ty, STI);
708 I.setDesc(
TII.get(NewOpc));
720 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
721 "unexpected instruction");
723 const Register DefReg =
I.getOperand(0).getReg();
724 LLT Ty =
MRI.getType(DefReg);
726 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
730 if (
I.getOperand(1).isCImm()) {
731 Val =
I.getOperand(1).getCImm()->getZExtValue();
732 I.getOperand(1).ChangeToImmediate(Val);
733 }
else if (
I.getOperand(1).isImm()) {
734 Val =
I.getOperand(1).getImm();
741 NewOpc = X86::MOV8ri;
744 NewOpc = X86::MOV16ri;
747 NewOpc = X86::MOV32ri;
752 NewOpc = X86::MOV64ri32;
754 NewOpc = X86::MOV64ri;
760 I.setDesc(
TII.get(NewOpc));
769 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
770 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
771 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
774bool X86InstructionSelector::selectTurnIntoCOPY(
779 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
780 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
785 I.setDesc(
TII.get(X86::COPY));
789bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
792 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
793 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
794 "unexpected instruction");
796 const Register DstReg =
I.getOperand(0).getReg();
797 const Register SrcReg =
I.getOperand(1).getReg();
799 const LLT DstTy =
MRI.getType(DstReg);
800 const LLT SrcTy =
MRI.getType(SrcReg);
807 <<
" input/output on different banks\n");
814 if (!DstRC || !SrcRC)
821 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
823 if (DstRB.
getID() != X86::GPRRegBankID)
827 if (DstRC == SrcRC) {
829 SubIdx = X86::NoSubRegister;
830 }
else if (DstRC == &X86::GR32RegClass) {
831 SubIdx = X86::sub_32bit;
832 }
else if (DstRC == &X86::GR16RegClass) {
833 SubIdx = X86::sub_16bit;
834 }
else if (DstRC == &X86::GR8RegClass) {
835 SubIdx = X86::sub_8bit;
840 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
842 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
843 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
849 I.getOperand(1).setSubReg(SubIdx);
851 I.setDesc(
TII.get(X86::COPY));
858 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
860 const Register DstReg =
I.getOperand(0).getReg();
861 const Register SrcReg =
I.getOperand(1).getReg();
863 const LLT DstTy =
MRI.getType(DstReg);
864 const LLT SrcTy =
MRI.getType(SrcReg);
867 "8=>16 Zext is handled by tablegen");
869 "8=>32 Zext is handled by tablegen");
871 "16=>32 Zext is handled by tablegen");
873 "8=>64 Zext is handled by tablegen");
875 "16=>64 Zext is handled by tablegen");
877 "32=>64 Zext is handled by tablegen");
884 AndOpc = X86::AND8ri;
886 AndOpc = X86::AND16ri;
888 AndOpc = X86::AND32ri;
890 AndOpc = X86::AND64ri32;
899 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
903 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
910 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
923 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
925 const Register DstReg =
I.getOperand(0).getReg();
926 const Register SrcReg =
I.getOperand(1).getReg();
928 const LLT DstTy =
MRI.getType(DstReg);
929 const LLT SrcTy =
MRI.getType(SrcReg);
935 "G_ANYEXT input/output on different banks\n");
938 "G_ANYEXT incorrect operand size");
947 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
949 if (DstRB.
getID() != X86::GPRRegBankID)
952 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
953 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
959 if (SrcRC == DstRC) {
960 I.setDesc(
TII.get(X86::COPY));
965 TII.get(TargetOpcode::SUBREG_TO_REG))
969 .
addImm(getSubRegIndex(SrcRC));
978 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
992 LLT Ty =
MRI.getType(LHS);
1001 OpCmp = X86::CMP16rr;
1004 OpCmp = X86::CMP32rr;
1007 OpCmp = X86::CMP64rr;
1012 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1017 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(
CC);
1022 I.eraseFromParent();
1029 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
1031 Register LhsReg =
I.getOperand(2).getReg();
1032 Register RhsReg =
I.getOperand(3).getReg();
1037 static const uint16_t SETFOpcTable[2][3] = {
1041 switch (Predicate) {
1045 SETFOpc = &SETFOpcTable[0][0];
1048 SETFOpc = &SETFOpcTable[1][0];
1054 LLT Ty =
MRI.getType(LhsReg);
1059 OpCmp = X86::UCOMISSrr;
1062 OpCmp = X86::UCOMISDrr;
1066 Register ResultReg =
I.getOperand(0).getReg();
1067 RBI.constrainGenericRegister(
1072 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1076 Register FlagReg1 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1077 Register FlagReg2 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1079 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1081 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1083 TII.get(SETFOpc[2]), ResultReg)
1091 I.eraseFromParent();
1105 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1113 I.eraseFromParent();
1120 assert((
I.getOpcode() == TargetOpcode::G_UADDE ||
1121 I.getOpcode() == TargetOpcode::G_UADDO ||
1122 I.getOpcode() == TargetOpcode::G_USUBE ||
1123 I.getOpcode() == TargetOpcode::G_USUBO) &&
1124 "unexpected instruction");
1126 const Register DstReg =
I.getOperand(0).getReg();
1127 const Register CarryOutReg =
I.getOperand(1).getReg();
1128 const Register Op0Reg =
I.getOperand(2).getReg();
1129 const Register Op1Reg =
I.getOperand(3).getReg();
1130 bool IsSub =
I.getOpcode() == TargetOpcode::G_USUBE ||
1131 I.getOpcode() == TargetOpcode::G_USUBO;
1132 bool HasCarryIn =
I.getOpcode() == TargetOpcode::G_UADDE ||
1133 I.getOpcode() == TargetOpcode::G_USUBE;
1135 const LLT DstTy =
MRI.getType(DstReg);
1136 assert(DstTy.
isScalar() &&
"selectUAddSub only supported for scalar types");
1139 unsigned OpADC, OpADD, OpSBB, OpSUB;
1142 OpADC = X86::ADC8rr;
1143 OpADD = X86::ADD8rr;
1144 OpSBB = X86::SBB8rr;
1145 OpSUB = X86::SUB8rr;
1148 OpADC = X86::ADC16rr;
1149 OpADD = X86::ADD16rr;
1150 OpSBB = X86::SBB16rr;
1151 OpSUB = X86::SUB16rr;
1154 OpADC = X86::ADC32rr;
1155 OpADD = X86::ADD32rr;
1156 OpSBB = X86::SBB32rr;
1157 OpSUB = X86::SUB32rr;
1160 OpADC = X86::ADC64rr;
1161 OpADD = X86::ADD64rr;
1162 OpSBB = X86::SBB64rr;
1163 OpSUB = X86::SUB64rr;
1172 unsigned Opcode = IsSub ? OpSUB : OpADD;
1176 Register CarryInReg =
I.getOperand(4).getReg();
1178 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1179 CarryInReg =
Def->getOperand(1).getReg();
1180 Def =
MRI.getVRegDef(CarryInReg);
1184 if (
Def->getOpcode() == TargetOpcode::G_UADDE ||
1185 Def->getOpcode() == TargetOpcode::G_UADDO ||
1186 Def->getOpcode() == TargetOpcode::G_USUBE ||
1187 Def->getOpcode() == TargetOpcode::G_USUBO) {
1189 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY),
1193 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC,
MRI))
1196 Opcode = IsSub ? OpSBB : OpADC;
1202 Opcode = IsSub ? OpSUB : OpADD;
1208 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1212 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1216 !RBI.constrainGenericRegister(CarryOutReg, *DstRC,
MRI))
1219 I.eraseFromParent();
1226 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1227 "unexpected instruction");
1229 const Register DstReg =
I.getOperand(0).getReg();
1230 const Register SrcReg =
I.getOperand(1).getReg();
1231 int64_t
Index =
I.getOperand(2).getImm();
1233 const LLT DstTy =
MRI.getType(DstReg);
1234 const LLT SrcTy =
MRI.getType(SrcReg);
1245 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1248 I.eraseFromParent();
1252 bool HasAVX = STI.hasAVX();
1253 bool HasAVX512 = STI.hasAVX512();
1254 bool HasVLX = STI.hasVLX();
1258 I.setDesc(
TII.get(X86::VEXTRACTF32x4Z256rr));
1260 I.setDesc(
TII.get(X86::VEXTRACTF128rr));
1265 I.setDesc(
TII.get(X86::VEXTRACTF32x4Zrr));
1267 I.setDesc(
TII.get(X86::VEXTRACTF64x4Zrr));
1275 I.getOperand(2).setImm(
Index);
1280bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1284 const LLT DstTy =
MRI.getType(DstReg);
1285 const LLT SrcTy =
MRI.getType(SrcReg);
1286 unsigned SubIdx = X86::NoSubRegister;
1292 "Incorrect Src/Dst register size");
1295 SubIdx = X86::sub_xmm;
1297 SubIdx = X86::sub_ymm;
1304 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1306 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1307 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1312 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1313 .
addReg(SrcReg, 0, SubIdx);
1318bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1322 const LLT DstTy =
MRI.getType(DstReg);
1323 const LLT SrcTy =
MRI.getType(SrcReg);
1324 unsigned SubIdx = X86::NoSubRegister;
1331 "Incorrect Src/Dst register size");
1334 SubIdx = X86::sub_xmm;
1336 SubIdx = X86::sub_ymm;
1343 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1344 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1349 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1359 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1361 const Register DstReg =
I.getOperand(0).getReg();
1362 const Register SrcReg =
I.getOperand(1).getReg();
1363 const Register InsertReg =
I.getOperand(2).getReg();
1364 int64_t
Index =
I.getOperand(3).getImm();
1366 const LLT DstTy =
MRI.getType(DstReg);
1367 const LLT InsertRegTy =
MRI.getType(InsertReg);
1376 if (
Index == 0 &&
MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1378 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1381 I.eraseFromParent();
1385 bool HasAVX = STI.hasAVX();
1386 bool HasAVX512 = STI.hasAVX512();
1387 bool HasVLX = STI.hasVLX();
1391 I.setDesc(
TII.get(X86::VINSERTF32x4Z256rr));
1393 I.setDesc(
TII.get(X86::VINSERTF128rr));
1398 I.setDesc(
TII.get(X86::VINSERTF32x4Zrr));
1400 I.setDesc(
TII.get(X86::VINSERTF64x4Zrr));
1409 I.getOperand(3).setImm(
Index);
1414bool X86InstructionSelector::selectUnmergeValues(
1416 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1417 "unexpected instruction");
1420 unsigned NumDefs =
I.getNumOperands() - 1;
1421 Register SrcReg =
I.getOperand(NumDefs).getReg();
1422 unsigned DefSize =
MRI.getType(
I.getOperand(0).getReg()).getSizeInBits();
1424 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1427 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(
Idx).getReg())
1431 if (!select(ExtrInst))
1435 I.eraseFromParent();
1439bool X86InstructionSelector::selectMergeValues(
1441 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1442 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1443 "unexpected instruction");
1446 Register DstReg =
I.getOperand(0).getReg();
1447 Register SrcReg0 =
I.getOperand(1).getReg();
1449 const LLT DstTy =
MRI.getType(DstReg);
1450 const LLT SrcTy =
MRI.getType(SrcReg0);
1456 Register DefReg =
MRI.createGenericVirtualRegister(DstTy);
1457 MRI.setRegBank(DefReg, RegBank);
1458 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1461 for (
unsigned Idx = 2;
Idx <
I.getNumOperands(); ++
Idx) {
1462 Register Tmp =
MRI.createGenericVirtualRegister(DstTy);
1463 MRI.setRegBank(Tmp, RegBank);
1466 TII.get(TargetOpcode::G_INSERT), Tmp)
1473 if (!select(InsertInst))
1478 TII.get(TargetOpcode::COPY), DstReg)
1481 if (!select(CopyInst))
1484 I.eraseFromParent();
1488bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1491 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1493 const Register CondReg =
I.getOperand(0).getReg();
1497 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1500 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1505 I.eraseFromParent();
1512 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1513 "unexpected instruction");
1520 const Register DstReg =
I.getOperand(0).getReg();
1521 const LLT DstTy =
MRI.getType(DstReg);
1524 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1527 const DebugLoc &DbgLoc =
I.getDebugLoc();
1530 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1534 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1540 Register AddrReg =
MRI.createVirtualRegister(&X86::GR64RegClass);
1541 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1558 unsigned PICBase = 0;
1567 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(Opc), DstReg), CPI, PICBase,
1573 I.eraseFromParent();
1577bool X86InstructionSelector::selectImplicitDefOrPHI(
1579 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1580 I.getOpcode() == TargetOpcode::G_PHI) &&
1581 "unexpected instruction");
1583 Register DstReg =
I.getOperand(0).getReg();
1585 if (!
MRI.getRegClassOrNull(DstReg)) {
1586 const LLT DstTy =
MRI.getType(DstReg);
1589 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1596 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1597 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1599 I.setDesc(
TII.get(X86::PHI));
1604bool X86InstructionSelector::selectMulDivRem(
MachineInstr &
I,
1608 assert((
I.getOpcode() == TargetOpcode::G_MUL ||
1609 I.getOpcode() == TargetOpcode::G_SMULH ||
1610 I.getOpcode() == TargetOpcode::G_UMULH ||
1611 I.getOpcode() == TargetOpcode::G_SDIV ||
1612 I.getOpcode() == TargetOpcode::G_SREM ||
1613 I.getOpcode() == TargetOpcode::G_UDIV ||
1614 I.getOpcode() == TargetOpcode::G_UREM) &&
1615 "unexpected instruction");
1617 const Register DstReg =
I.getOperand(0).getReg();
1618 const Register Op1Reg =
I.getOperand(1).getReg();
1619 const Register Op2Reg =
I.getOperand(2).getReg();
1621 const LLT RegTy =
MRI.getType(DstReg);
1622 assert(RegTy ==
MRI.getType(Op1Reg) && RegTy ==
MRI.getType(Op2Reg) &&
1623 "Arguments and return value types must match");
1626 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1629 const static unsigned NumTypes = 4;
1630 const static unsigned NumOps = 7;
1631 const static bool S =
true;
1632 const static bool U =
false;
1633 const static unsigned Copy = TargetOpcode::COPY;
1643 const static struct MulDivRemEntry {
1645 unsigned SizeInBits;
1649 struct MulDivRemResult {
1650 unsigned OpMulDivRem;
1651 unsigned OpSignExtend;
1657 } ResultTable[NumOps];
1658 } OpTable[NumTypes] = {
1663 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},
1664 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1665 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U},
1666 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1667 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S},
1668 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S},
1669 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1675 {X86::IDIV16r, X86::CWD,
Copy, X86::AX, S},
1676 {X86::IDIV16r, X86::CWD,
Copy, X86::DX, S},
1677 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U},
1678 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U},
1679 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::AX, S},
1680 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::DX, S},
1681 {X86::MUL16r, X86::MOV32r0,
Copy, X86::DX,
U},
1687 {X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S},
1688 {X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S},
1689 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U},
1690 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1691 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EAX, S},
1692 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EDX, S},
1693 {X86::MUL32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1699 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S},
1700 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S},
1701 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U},
1702 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1703 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RAX, S},
1704 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RDX, S},
1705 {X86::MUL64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1709 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const MulDivRemEntry &El) {
1712 if (OpEntryIt == std::end(OpTable))
1716 switch (
I.getOpcode()) {
1719 case TargetOpcode::G_SDIV:
1722 case TargetOpcode::G_SREM:
1725 case TargetOpcode::G_UDIV:
1728 case TargetOpcode::G_UREM:
1731 case TargetOpcode::G_MUL:
1734 case TargetOpcode::G_SMULH:
1737 case TargetOpcode::G_UMULH:
1742 const MulDivRemEntry &
TypeEntry = *OpEntryIt;
1743 const MulDivRemEntry::MulDivRemResult &OpEntry =
1747 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1748 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1749 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1756 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1761 if (OpEntry.OpSignExtend) {
1762 if (OpEntry.IsOpSigned)
1764 TII.get(OpEntry.OpSignExtend));
1766 Register Zero32 =
MRI.createVirtualRegister(&X86::GR32RegClass);
1767 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1776 .
addReg(Zero32, 0, X86::sub_16bit);
1783 TII.get(TargetOpcode::SUBREG_TO_REG),
TypeEntry.HighInReg)
1792 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpMulDivRem))
1803 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1804 Register SourceSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1805 Register ResultSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1806 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1810 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1816 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1818 .
addReg(ResultSuperReg, 0, X86::sub_8bit);
1820 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1822 .
addReg(OpEntry.ResultReg);
1824 I.eraseFromParent();
1833 unsigned DstReg = Sel.
getReg(0);
1839 LLT Ty =
MRI.getType(DstReg);
1844 OpCmp = X86::CMOV_GR8;
1847 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1850 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1853 assert(STI.is64Bit() && STI.canUseCMOV());
1854 OpCmp = X86::CMOV64rr;
1863 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1876 return new X86InstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
ConstantFP - Floating Point Values [float, double].
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
An instruction for reading from memory.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@639 BaseType
union llvm::X86AddressMode::@640 Base