40#include "llvm/IR/IntrinsicsX86.h"
50#define DEBUG_TYPE "X86-isel"
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
74 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
75 Align Alignment)
const;
110 const unsigned DstReg,
112 const unsigned SrcReg,
123 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
126 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
139#define GET_GLOBALISEL_PREDICATES_DECL
140#include "X86GenGlobalISel.inc"
141#undef GET_GLOBALISEL_PREDICATES_DECL
143#define GET_GLOBALISEL_TEMPORARIES_DECL
144#include "X86GenGlobalISel.inc"
145#undef GET_GLOBALISEL_TEMPORARIES_DECL
150#define GET_GLOBALISEL_IMPL
151#include "X86GenGlobalISel.inc"
152#undef GET_GLOBALISEL_IMPL
157 : TM(TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
160#include
"X86GenGlobalISel.inc"
163#include
"X86GenGlobalISel.inc"
171X86InstructionSelector::getRegClass(
LLT Ty,
const RegisterBank &RB)
const {
172 if (RB.
getID() == X86::GPRRegBankID) {
174 return &X86::GR8RegClass;
176 return &X86::GR16RegClass;
178 return &X86::GR32RegClass;
180 return &X86::GR64RegClass;
182 if (RB.
getID() == X86::VECRRegBankID) {
184 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
186 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
188 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
190 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
192 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
194 return &X86::VR512RegClass;
197 if (RB.
getID() == X86::PSRRegBankID) {
199 return &X86::RFP80RegClass;
201 return &X86::RFP64RegClass;
203 return &X86::RFP32RegClass;
210X86InstructionSelector::getRegClass(
LLT Ty,
unsigned Reg,
217 unsigned SubIdx = X86::NoSubRegister;
218 if (RC == &X86::GR32RegClass) {
219 SubIdx = X86::sub_32bit;
220 }
else if (RC == &X86::GR16RegClass) {
221 SubIdx = X86::sub_16bit;
222 }
else if (RC == &X86::GR8RegClass) {
223 SubIdx = X86::sub_8bit;
231 if (X86::GR64RegClass.
contains(Reg))
232 return &X86::GR64RegClass;
233 if (X86::GR32RegClass.
contains(Reg))
234 return &X86::GR32RegClass;
235 if (X86::GR16RegClass.
contains(Reg))
236 return &X86::GR16RegClass;
238 return &X86::GR8RegClass;
246bool X86InstructionSelector::selectDebugInstr(
MachineInstr &
I,
254 if (
Reg.isPhysical())
256 LLT Ty =
MRI.getType(Reg);
259 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
261 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
265 dbgs() <<
"Warning: DBG_VALUE operand has unexpected size/bank\n");
269 RBI.constrainGenericRegister(Reg, *RC,
MRI);
278 Register DstReg =
I.getOperand(0).getReg();
279 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
282 Register SrcReg =
I.getOperand(1).getReg();
283 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
287 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
289 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
290 DstRegBank.
getID() == X86::GPRRegBankID) {
296 if (SrcRC != DstRC) {
298 Register ExtSrc =
MRI.createVirtualRegister(DstRC);
300 TII.get(TargetOpcode::SUBREG_TO_REG))
304 .
addImm(getSubRegIndex(SrcRC));
306 I.getOperand(1).setReg(ExtSrc);
314 "No phys reg on generic operators");
315 assert((DstSize == SrcSize ||
319 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
320 "Copy with different width?!");
325 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
326 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
332 if (DstRC != SrcRC) {
333 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
334 I.getOperand(1).substPhysReg(SrcReg,
TRI);
343 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
349 I.setDesc(
TII.get(X86::COPY));
354 assert(
I.getParent() &&
"Instruction should be in a basic block!");
355 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
361 unsigned Opcode =
I.getOpcode();
365 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
371 if (
I.isDebugInstr())
377 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
378 "Generic instruction has unexpected implicit operands\n");
380 if (selectImpl(
I, *CoverageInfo))
386 switch (
I.getOpcode()) {
389 case TargetOpcode::G_STORE:
390 case TargetOpcode::G_LOAD:
392 case TargetOpcode::G_PTR_ADD:
393 case TargetOpcode::G_FRAME_INDEX:
394 return selectFrameIndexOrGep(
I,
MRI, MF);
395 case TargetOpcode::G_GLOBAL_VALUE:
396 return selectGlobalValue(
I,
MRI, MF);
397 case TargetOpcode::G_CONSTANT:
398 return selectConstant(
I,
MRI, MF);
399 case TargetOpcode::G_FCONSTANT:
400 return materializeFP(
I,
MRI, MF);
401 case TargetOpcode::G_PTRTOINT:
402 case TargetOpcode::G_TRUNC:
403 return selectTruncOrPtrToInt(
I,
MRI, MF);
404 case TargetOpcode::G_INTTOPTR:
406 case TargetOpcode::G_ZEXT:
407 return selectZext(
I,
MRI, MF);
408 case TargetOpcode::G_ANYEXT:
409 return selectAnyext(
I,
MRI, MF);
410 case TargetOpcode::G_ICMP:
411 return selectCmp(
I,
MRI, MF);
412 case TargetOpcode::G_FCMP:
413 return selectFCmp(
I,
MRI, MF);
414 case TargetOpcode::G_UADDE:
415 case TargetOpcode::G_UADDO:
416 case TargetOpcode::G_USUBE:
417 case TargetOpcode::G_USUBO:
418 return selectUAddSub(
I,
MRI, MF);
419 case TargetOpcode::G_UNMERGE_VALUES:
421 case TargetOpcode::G_MERGE_VALUES:
422 case TargetOpcode::G_CONCAT_VECTORS:
424 case TargetOpcode::G_EXTRACT:
425 return selectExtract(
I,
MRI, MF);
426 case TargetOpcode::G_INSERT:
427 return selectInsert(
I,
MRI, MF);
428 case TargetOpcode::G_BRCOND:
429 return selectCondBranch(
I,
MRI, MF);
430 case TargetOpcode::G_IMPLICIT_DEF:
431 case TargetOpcode::G_PHI:
432 return selectImplicitDefOrPHI(
I,
MRI);
433 case TargetOpcode::G_MUL:
434 case TargetOpcode::G_SMULH:
435 case TargetOpcode::G_UMULH:
436 case TargetOpcode::G_SDIV:
437 case TargetOpcode::G_UDIV:
438 case TargetOpcode::G_SREM:
439 case TargetOpcode::G_UREM:
440 return selectMulDivRem(
I,
MRI, MF);
441 case TargetOpcode::G_SELECT:
442 return selectSelect(
I,
MRI, MF);
448unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
451 Align Alignment)
const {
452 bool Isload = (Opc == TargetOpcode::G_LOAD);
453 bool HasAVX = STI.hasAVX();
454 bool HasAVX512 = STI.hasAVX512();
455 bool HasVLX = STI.hasVLX();
458 if (X86::GPRRegBankID == RB.
getID())
459 return Isload ? X86::MOV8rm : X86::MOV8mr;
461 if (X86::GPRRegBankID == RB.
getID())
462 return Isload ? X86::MOV16rm : X86::MOV16mr;
464 if (X86::GPRRegBankID == RB.
getID())
465 return Isload ? X86::MOV32rm : X86::MOV32mr;
466 if (X86::VECRRegBankID == RB.
getID())
467 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
468 HasAVX ? X86::VMOVSSrm_alt :
470 : (HasAVX512 ? X86::VMOVSSZmr :
471 HasAVX ? X86::VMOVSSmr :
473 if (X86::PSRRegBankID == RB.
getID())
474 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
476 if (X86::GPRRegBankID == RB.
getID())
477 return Isload ? X86::MOV64rm : X86::MOV64mr;
478 if (X86::VECRRegBankID == RB.
getID())
479 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
480 HasAVX ? X86::VMOVSDrm_alt :
482 : (HasAVX512 ? X86::VMOVSDZmr :
483 HasAVX ? X86::VMOVSDmr :
485 if (X86::PSRRegBankID == RB.
getID())
486 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
488 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
490 if (Alignment >=
Align(16))
491 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
493 ? X86::VMOVAPSZ128rm_NOVLX
494 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
495 : (HasVLX ? X86::VMOVAPSZ128mr
497 ? X86::VMOVAPSZ128mr_NOVLX
498 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
500 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
502 ? X86::VMOVUPSZ128rm_NOVLX
503 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
504 : (HasVLX ? X86::VMOVUPSZ128mr
506 ? X86::VMOVUPSZ128mr_NOVLX
507 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
509 if (Alignment >=
Align(32))
510 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
511 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
513 : (HasVLX ? X86::VMOVAPSZ256mr
514 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
517 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
518 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
520 : (HasVLX ? X86::VMOVUPSZ256mr
521 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
524 if (Alignment >=
Align(64))
525 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
527 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
536 assert(
I.getOperand(0).isReg() &&
"unsupported opperand.");
537 assert(
MRI.getType(
I.getOperand(0).getReg()).isPointer() &&
538 "unsupported type.");
540 if (
I.getOpcode() == TargetOpcode::G_PTR_ADD) {
543 if (isInt<32>(Imm)) {
544 AM.
Disp =
static_cast<int32_t
>(Imm);
545 AM.
Base.
Reg =
I.getOperand(1).getReg();
549 }
else if (
I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
556 AM.
Base.
Reg =
I.getOperand(0).getReg();
559bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
562 unsigned Opc =
I.getOpcode();
564 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
565 "Only G_STORE and G_LOAD are expected for selection");
567 const Register DefReg =
I.getOperand(0).getReg();
568 LLT Ty =
MRI.getType(DefReg);
572 auto &
MemOp = **
I.memoperands_begin();
573 if (
MemOp.isAtomic()) {
579 if (!
MemOp.isUnordered()) {
589 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc,
MemOp.getAlign());
593 I.setDesc(
TII.get(NewOpc));
597 if (
Ptr->getOpcode() == TargetOpcode::G_CONSTANT_POOL) {
598 assert(Opc == TargetOpcode::G_LOAD &&
599 "Only G_LOAD from constant pool is expected");
604 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
605 unsigned PICBase = 0;
607 PICBase =
TII.getGlobalBaseReg(&MF);
608 else if (STI.is64Bit())
619 if (Opc == TargetOpcode::G_LOAD) {
629 I.addImplicitDefUseOperands(MF);
642bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
645 unsigned Opc =
I.getOpcode();
647 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
648 "unexpected instruction");
650 const Register DefReg =
I.getOperand(0).getReg();
651 LLT Ty =
MRI.getType(DefReg);
654 unsigned NewOpc =
getLeaOP(Ty, STI);
655 I.setDesc(
TII.get(NewOpc));
658 if (Opc == TargetOpcode::G_FRAME_INDEX) {
664 MIB.addImm(0).addReg(0);
670bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
673 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
674 "unexpected instruction");
676 auto GV =
I.getOperand(1).getGlobal();
677 if (GV->isThreadLocal()) {
687 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
697 if (STI.isPICStyleRIPRel()) {
703 const Register DefReg =
I.getOperand(0).getReg();
704 LLT Ty =
MRI.getType(DefReg);
705 unsigned NewOpc =
getLeaOP(Ty, STI);
707 I.setDesc(
TII.get(NewOpc));
719 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
720 "unexpected instruction");
722 const Register DefReg =
I.getOperand(0).getReg();
723 LLT Ty =
MRI.getType(DefReg);
725 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
729 if (
I.getOperand(1).isCImm()) {
730 Val =
I.getOperand(1).getCImm()->getZExtValue();
731 I.getOperand(1).ChangeToImmediate(Val);
732 }
else if (
I.getOperand(1).isImm()) {
733 Val =
I.getOperand(1).getImm();
740 NewOpc = X86::MOV8ri;
743 NewOpc = X86::MOV16ri;
746 NewOpc = X86::MOV32ri;
751 NewOpc = X86::MOV64ri32;
753 NewOpc = X86::MOV64ri;
759 I.setDesc(
TII.get(NewOpc));
768 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
769 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
770 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
773bool X86InstructionSelector::selectTurnIntoCOPY(
778 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
779 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
784 I.setDesc(
TII.get(X86::COPY));
788bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
791 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
792 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
793 "unexpected instruction");
795 const Register DstReg =
I.getOperand(0).getReg();
796 const Register SrcReg =
I.getOperand(1).getReg();
798 const LLT DstTy =
MRI.getType(DstReg);
799 const LLT SrcTy =
MRI.getType(SrcReg);
806 <<
" input/output on different banks\n");
813 if (!DstRC || !SrcRC)
820 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
822 if (DstRB.
getID() != X86::GPRRegBankID)
826 if (DstRC == SrcRC) {
828 SubIdx = X86::NoSubRegister;
829 }
else if (DstRC == &X86::GR32RegClass) {
830 SubIdx = X86::sub_32bit;
831 }
else if (DstRC == &X86::GR16RegClass) {
832 SubIdx = X86::sub_16bit;
833 }
else if (DstRC == &X86::GR8RegClass) {
834 SubIdx = X86::sub_8bit;
839 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
841 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
842 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
848 I.getOperand(1).setSubReg(SubIdx);
850 I.setDesc(
TII.get(X86::COPY));
857 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
859 const Register DstReg =
I.getOperand(0).getReg();
860 const Register SrcReg =
I.getOperand(1).getReg();
862 const LLT DstTy =
MRI.getType(DstReg);
863 const LLT SrcTy =
MRI.getType(SrcReg);
866 "8=>16 Zext is handled by tablegen");
868 "8=>32 Zext is handled by tablegen");
870 "16=>32 Zext is handled by tablegen");
872 "8=>64 Zext is handled by tablegen");
874 "16=>64 Zext is handled by tablegen");
876 "32=>64 Zext is handled by tablegen");
883 AndOpc = X86::AND8ri;
885 AndOpc = X86::AND16ri;
887 AndOpc = X86::AND32ri;
889 AndOpc = X86::AND64ri32;
898 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
902 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
909 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
922 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
924 const Register DstReg =
I.getOperand(0).getReg();
925 const Register SrcReg =
I.getOperand(1).getReg();
927 const LLT DstTy =
MRI.getType(DstReg);
928 const LLT SrcTy =
MRI.getType(SrcReg);
934 "G_ANYEXT input/output on different banks\n");
937 "G_ANYEXT incorrect operand size");
946 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
948 if (DstRB.
getID() != X86::GPRRegBankID)
951 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
952 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
958 if (SrcRC == DstRC) {
959 I.setDesc(
TII.get(X86::COPY));
964 TII.get(TargetOpcode::SUBREG_TO_REG))
968 .
addImm(getSubRegIndex(SrcRC));
977 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
991 LLT Ty =
MRI.getType(LHS);
1000 OpCmp = X86::CMP16rr;
1003 OpCmp = X86::CMP32rr;
1006 OpCmp = X86::CMP64rr;
1011 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1016 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(
CC);
1021 I.eraseFromParent();
1028 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
1030 Register LhsReg =
I.getOperand(2).getReg();
1031 Register RhsReg =
I.getOperand(3).getReg();
1036 static const uint16_t SETFOpcTable[2][3] = {
1040 switch (Predicate) {
1044 SETFOpc = &SETFOpcTable[0][0];
1047 SETFOpc = &SETFOpcTable[1][0];
1053 LLT Ty =
MRI.getType(LhsReg);
1058 OpCmp = X86::UCOMISSrr;
1061 OpCmp = X86::UCOMISDrr;
1065 Register ResultReg =
I.getOperand(0).getReg();
1066 RBI.constrainGenericRegister(
1071 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1075 Register FlagReg1 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1076 Register FlagReg2 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1078 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1080 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1082 TII.get(SETFOpc[2]), ResultReg)
1090 I.eraseFromParent();
1104 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1112 I.eraseFromParent();
1119 assert((
I.getOpcode() == TargetOpcode::G_UADDE ||
1120 I.getOpcode() == TargetOpcode::G_UADDO ||
1121 I.getOpcode() == TargetOpcode::G_USUBE ||
1122 I.getOpcode() == TargetOpcode::G_USUBO) &&
1123 "unexpected instruction");
1125 const Register DstReg =
I.getOperand(0).getReg();
1126 const Register CarryOutReg =
I.getOperand(1).getReg();
1127 const Register Op0Reg =
I.getOperand(2).getReg();
1128 const Register Op1Reg =
I.getOperand(3).getReg();
1129 bool IsSub =
I.getOpcode() == TargetOpcode::G_USUBE ||
1130 I.getOpcode() == TargetOpcode::G_USUBO;
1131 bool HasCarryIn =
I.getOpcode() == TargetOpcode::G_UADDE ||
1132 I.getOpcode() == TargetOpcode::G_USUBE;
1134 const LLT DstTy =
MRI.getType(DstReg);
1135 assert(DstTy.
isScalar() &&
"selectUAddSub only supported for scalar types");
1138 unsigned OpADC, OpADD, OpSBB, OpSUB;
1141 OpADC = X86::ADC8rr;
1142 OpADD = X86::ADD8rr;
1143 OpSBB = X86::SBB8rr;
1144 OpSUB = X86::SUB8rr;
1147 OpADC = X86::ADC16rr;
1148 OpADD = X86::ADD16rr;
1149 OpSBB = X86::SBB16rr;
1150 OpSUB = X86::SUB16rr;
1153 OpADC = X86::ADC32rr;
1154 OpADD = X86::ADD32rr;
1155 OpSBB = X86::SBB32rr;
1156 OpSUB = X86::SUB32rr;
1159 OpADC = X86::ADC64rr;
1160 OpADD = X86::ADD64rr;
1161 OpSBB = X86::SBB64rr;
1162 OpSUB = X86::SUB64rr;
1171 unsigned Opcode = IsSub ? OpSUB : OpADD;
1175 Register CarryInReg =
I.getOperand(4).getReg();
1177 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1178 CarryInReg =
Def->getOperand(1).getReg();
1179 Def =
MRI.getVRegDef(CarryInReg);
1183 if (
Def->getOpcode() == TargetOpcode::G_UADDE ||
1184 Def->getOpcode() == TargetOpcode::G_UADDO ||
1185 Def->getOpcode() == TargetOpcode::G_USUBE ||
1186 Def->getOpcode() == TargetOpcode::G_USUBO) {
1188 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY),
1192 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC,
MRI))
1195 Opcode = IsSub ? OpSBB : OpADC;
1201 Opcode = IsSub ? OpSUB : OpADD;
1207 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1211 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1215 !RBI.constrainGenericRegister(CarryOutReg, *DstRC,
MRI))
1218 I.eraseFromParent();
1225 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1226 "unexpected instruction");
1228 const Register DstReg =
I.getOperand(0).getReg();
1229 const Register SrcReg =
I.getOperand(1).getReg();
1230 int64_t
Index =
I.getOperand(2).getImm();
1232 const LLT DstTy =
MRI.getType(DstReg);
1233 const LLT SrcTy =
MRI.getType(SrcReg);
1244 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1247 I.eraseFromParent();
1251 bool HasAVX = STI.hasAVX();
1252 bool HasAVX512 = STI.hasAVX512();
1253 bool HasVLX = STI.hasVLX();
1257 I.setDesc(
TII.get(X86::VEXTRACTF32X4Z256rri));
1259 I.setDesc(
TII.get(X86::VEXTRACTF128rri));
1264 I.setDesc(
TII.get(X86::VEXTRACTF32X4Zrri));
1266 I.setDesc(
TII.get(X86::VEXTRACTF64X4Zrri));
1274 I.getOperand(2).setImm(Index);
1279bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1283 const LLT DstTy =
MRI.getType(DstReg);
1284 const LLT SrcTy =
MRI.getType(SrcReg);
1285 unsigned SubIdx = X86::NoSubRegister;
1291 "Incorrect Src/Dst register size");
1294 SubIdx = X86::sub_xmm;
1296 SubIdx = X86::sub_ymm;
1303 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1305 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1306 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1311 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1312 .
addReg(SrcReg, 0, SubIdx);
1317bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1321 const LLT DstTy =
MRI.getType(DstReg);
1322 const LLT SrcTy =
MRI.getType(SrcReg);
1323 unsigned SubIdx = X86::NoSubRegister;
1330 "Incorrect Src/Dst register size");
1333 SubIdx = X86::sub_xmm;
1335 SubIdx = X86::sub_ymm;
1342 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1343 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1348 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1358 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1360 const Register DstReg =
I.getOperand(0).getReg();
1361 const Register SrcReg =
I.getOperand(1).getReg();
1362 const Register InsertReg =
I.getOperand(2).getReg();
1363 int64_t
Index =
I.getOperand(3).getImm();
1365 const LLT DstTy =
MRI.getType(DstReg);
1366 const LLT InsertRegTy =
MRI.getType(InsertReg);
1375 if (Index == 0 &&
MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1377 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1380 I.eraseFromParent();
1384 bool HasAVX = STI.hasAVX();
1385 bool HasAVX512 = STI.hasAVX512();
1386 bool HasVLX = STI.hasVLX();
1390 I.setDesc(
TII.get(X86::VINSERTF32X4Z256rri));
1392 I.setDesc(
TII.get(X86::VINSERTF128rri));
1397 I.setDesc(
TII.get(X86::VINSERTF32X4Zrri));
1399 I.setDesc(
TII.get(X86::VINSERTF64X4Zrri));
1408 I.getOperand(3).setImm(Index);
1413bool X86InstructionSelector::selectUnmergeValues(
1415 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1416 "unexpected instruction");
1419 unsigned NumDefs =
I.getNumOperands() - 1;
1420 Register SrcReg =
I.getOperand(NumDefs).getReg();
1421 unsigned DefSize =
MRI.getType(
I.getOperand(0).getReg()).getSizeInBits();
1423 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1426 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(
Idx).getReg())
1430 if (!select(ExtrInst))
1434 I.eraseFromParent();
1438bool X86InstructionSelector::selectMergeValues(
1440 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1441 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1442 "unexpected instruction");
1445 Register DstReg =
I.getOperand(0).getReg();
1446 Register SrcReg0 =
I.getOperand(1).getReg();
1448 const LLT DstTy =
MRI.getType(DstReg);
1449 const LLT SrcTy =
MRI.getType(SrcReg0);
1455 Register DefReg =
MRI.createGenericVirtualRegister(DstTy);
1456 MRI.setRegBank(DefReg, RegBank);
1457 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1460 for (
unsigned Idx = 2;
Idx <
I.getNumOperands(); ++
Idx) {
1461 Register Tmp =
MRI.createGenericVirtualRegister(DstTy);
1462 MRI.setRegBank(Tmp, RegBank);
1465 TII.get(TargetOpcode::G_INSERT), Tmp)
1472 if (!select(InsertInst))
1477 TII.get(TargetOpcode::COPY), DstReg)
1480 if (!select(CopyInst))
1483 I.eraseFromParent();
1487bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1490 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1492 const Register CondReg =
I.getOperand(0).getReg();
1496 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1499 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1504 I.eraseFromParent();
1511 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1512 "unexpected instruction");
1519 const Register DstReg =
I.getOperand(0).getReg();
1520 const LLT DstTy =
MRI.getType(DstReg);
1523 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1526 const DebugLoc &DbgLoc =
I.getDebugLoc();
1529 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1533 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1539 Register AddrReg =
MRI.createVirtualRegister(&X86::GR64RegClass);
1540 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1557 unsigned PICBase = 0;
1566 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(Opc), DstReg), CPI, PICBase,
1572 I.eraseFromParent();
1576bool X86InstructionSelector::selectImplicitDefOrPHI(
1578 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1579 I.getOpcode() == TargetOpcode::G_PHI) &&
1580 "unexpected instruction");
1582 Register DstReg =
I.getOperand(0).getReg();
1584 if (!
MRI.getRegClassOrNull(DstReg)) {
1585 const LLT DstTy =
MRI.getType(DstReg);
1588 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1595 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1596 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1598 I.setDesc(
TII.get(X86::PHI));
1603bool X86InstructionSelector::selectMulDivRem(
MachineInstr &
I,
1607 assert((
I.getOpcode() == TargetOpcode::G_MUL ||
1608 I.getOpcode() == TargetOpcode::G_SMULH ||
1609 I.getOpcode() == TargetOpcode::G_UMULH ||
1610 I.getOpcode() == TargetOpcode::G_SDIV ||
1611 I.getOpcode() == TargetOpcode::G_SREM ||
1612 I.getOpcode() == TargetOpcode::G_UDIV ||
1613 I.getOpcode() == TargetOpcode::G_UREM) &&
1614 "unexpected instruction");
1616 const Register DstReg =
I.getOperand(0).getReg();
1617 const Register Op1Reg =
I.getOperand(1).getReg();
1618 const Register Op2Reg =
I.getOperand(2).getReg();
1620 const LLT RegTy =
MRI.getType(DstReg);
1621 assert(RegTy ==
MRI.getType(Op1Reg) && RegTy ==
MRI.getType(Op2Reg) &&
1622 "Arguments and return value types must match");
1625 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1628 const static unsigned NumTypes = 4;
1629 const static unsigned NumOps = 7;
1630 const static bool S =
true;
1631 const static bool U =
false;
1632 const static unsigned Copy = TargetOpcode::COPY;
1642 const static struct MulDivRemEntry {
1644 unsigned SizeInBits;
1648 struct MulDivRemResult {
1649 unsigned OpMulDivRem;
1650 unsigned OpSignExtend;
1656 } ResultTable[NumOps];
1657 } OpTable[NumTypes] = {
1662 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},
1663 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1664 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U},
1665 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1666 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S},
1667 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S},
1668 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1674 {X86::IDIV16r, X86::CWD,
Copy, X86::AX, S},
1675 {X86::IDIV16r, X86::CWD,
Copy, X86::DX, S},
1676 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U},
1677 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U},
1678 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::AX, S},
1679 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::DX, S},
1680 {X86::MUL16r, X86::MOV32r0,
Copy, X86::DX,
U},
1686 {X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S},
1687 {X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S},
1688 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U},
1689 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1690 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EAX, S},
1691 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EDX, S},
1692 {X86::MUL32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1698 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S},
1699 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S},
1700 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U},
1701 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1702 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RAX, S},
1703 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RDX, S},
1704 {X86::MUL64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1708 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const MulDivRemEntry &El) {
1711 if (OpEntryIt == std::end(OpTable))
1715 switch (
I.getOpcode()) {
1718 case TargetOpcode::G_SDIV:
1721 case TargetOpcode::G_SREM:
1724 case TargetOpcode::G_UDIV:
1727 case TargetOpcode::G_UREM:
1730 case TargetOpcode::G_MUL:
1733 case TargetOpcode::G_SMULH:
1736 case TargetOpcode::G_UMULH:
1741 const MulDivRemEntry &
TypeEntry = *OpEntryIt;
1742 const MulDivRemEntry::MulDivRemResult &OpEntry =
1746 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1747 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1748 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1755 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1760 if (OpEntry.OpSignExtend) {
1761 if (OpEntry.IsOpSigned)
1763 TII.get(OpEntry.OpSignExtend));
1765 Register Zero32 =
MRI.createVirtualRegister(&X86::GR32RegClass);
1766 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1775 .
addReg(Zero32, 0, X86::sub_16bit);
1782 TII.get(TargetOpcode::SUBREG_TO_REG),
TypeEntry.HighInReg)
1791 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpMulDivRem))
1802 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1803 Register SourceSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1804 Register ResultSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1805 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1809 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1815 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1817 .
addReg(ResultSuperReg, 0, X86::sub_8bit);
1819 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1821 .
addReg(OpEntry.ResultReg);
1823 I.eraseFromParent();
1832 unsigned DstReg = Sel.
getReg(0);
1838 LLT Ty =
MRI.getType(DstReg);
1843 OpCmp = X86::CMOV_GR8;
1846 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1849 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1852 assert(STI.is64Bit() && STI.canUseCMOV());
1853 OpCmp = X86::CMOV64rr;
1862 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1875 return new X86InstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
ConstantFP - Floating Point Values [float, double].
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
An instruction for reading from memory.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@662 BaseType
union llvm::X86AddressMode::@663 Base