52#define DEBUG_TYPE "x86-instr-info"
54#define GET_INSTRINFO_CTOR_DTOR
55#include "X86GenInstrInfo.inc"
61 cl::desc(
"Disable fusing of spill code into instructions"),
65 cl::desc(
"Print instructions that the allocator wants to"
66 " fuse, but the X86 backend currently can't"),
70 cl::desc(
"Re-materialize load from stub in PIC mode"),
74 cl::desc(
"Clearance between two register writes "
75 "for inserting XOR to avoid partial "
79 "undef-reg-clearance",
80 cl::desc(
"How many idle instructions we would like before "
81 "certain undef register reads"),
85void X86InstrInfo::anchor() {}
89 (STI.isTarget64BitLP64() ?
X86::ADJCALLSTACKDOWN64
90 :
X86::ADJCALLSTACKDOWN32),
91 (STI.isTarget64BitLP64() ?
X86::ADJCALLSTACKUP64
92 :
X86::ADJCALLSTACKUP32),
94 Subtarget(STI), RI(STI.getTargetTriple()) {}
97 unsigned OpNum)
const {
101 if (!RC || !Subtarget.hasEGPR())
113 unsigned &SubIdx)
const {
114 switch (
MI.getOpcode()) {
117 case X86::MOVSX16rr8:
118 case X86::MOVZX16rr8:
119 case X86::MOVSX32rr8:
120 case X86::MOVZX32rr8:
121 case X86::MOVSX64rr8:
122 if (!Subtarget.is64Bit())
127 case X86::MOVSX32rr16:
128 case X86::MOVZX32rr16:
129 case X86::MOVSX64rr16:
130 case X86::MOVSX64rr32: {
131 if (
MI.getOperand(0).getSubReg() ||
MI.getOperand(1).getSubReg())
134 SrcReg =
MI.getOperand(1).getReg();
135 DstReg =
MI.getOperand(0).getReg();
136 switch (
MI.getOpcode()) {
139 case X86::MOVSX16rr8:
140 case X86::MOVZX16rr8:
141 case X86::MOVSX32rr8:
142 case X86::MOVZX32rr8:
143 case X86::MOVSX64rr8:
144 SubIdx = X86::sub_8bit;
146 case X86::MOVSX32rr16:
147 case X86::MOVZX32rr16:
148 case X86::MOVSX64rr16:
149 SubIdx = X86::sub_16bit;
151 case X86::MOVSX64rr32:
152 SubIdx = X86::sub_32bit;
162 if (
MI.mayLoad() ||
MI.mayStore())
167 if (
MI.isCopyLike() ||
MI.isInsertSubreg())
170 unsigned Opcode =
MI.getOpcode();
181 if (isBSF(Opcode) || isBSR(Opcode) || isLZCNT(Opcode) || isPOPCNT(Opcode) ||
187 if (isBLCFILL(Opcode) || isBLCI(Opcode) || isBLCIC(Opcode) ||
188 isBLCMSK(Opcode) || isBLCS(Opcode) || isBLSFILL(Opcode) ||
189 isBLSI(Opcode) || isBLSIC(Opcode) || isBLSMSK(Opcode) || isBLSR(Opcode) ||
194 if (isBEXTR(Opcode) || isBZHI(Opcode))
197 if (isROL(Opcode) || isROR(Opcode) || isSAR(Opcode) || isSHL(Opcode) ||
198 isSHR(Opcode) || isSHLD(Opcode) || isSHRD(Opcode))
201 if (isADC(Opcode) || isADD(Opcode) || isAND(Opcode) || isOR(Opcode) ||
202 isSBB(Opcode) || isSUB(Opcode) || isXOR(Opcode))
208 if (isDEC(Opcode) || isINC(Opcode) || isNEG(Opcode))
216 if (isMOVSX(Opcode) || isMOVZX(Opcode) || isMOVSXD(Opcode) || isMOV(Opcode))
219 if (isRORX(Opcode) || isSARX(Opcode) || isSHLX(Opcode) || isSHRX(Opcode))
229 switch (
MI.getOpcode()) {
242 case X86::IMUL64rmi32:
257 case X86::POPCNT16rm:
258 case X86::POPCNT32rm:
259 case X86::POPCNT64rm:
267 case X86::BLCFILL32rm:
268 case X86::BLCFILL64rm:
273 case X86::BLCMSK32rm:
274 case X86::BLCMSK64rm:
277 case X86::BLSFILL32rm:
278 case X86::BLSFILL64rm:
283 case X86::BLSMSK32rm:
284 case X86::BLSMSK64rm:
294 case X86::BEXTRI32mi:
295 case X86::BEXTRI64mi:
348 case X86::CVTTSD2SI64rm:
349 case X86::VCVTTSD2SI64rm:
350 case X86::VCVTTSD2SI64Zrm:
351 case X86::CVTTSD2SIrm:
352 case X86::VCVTTSD2SIrm:
353 case X86::VCVTTSD2SIZrm:
354 case X86::CVTTSS2SI64rm:
355 case X86::VCVTTSS2SI64rm:
356 case X86::VCVTTSS2SI64Zrm:
357 case X86::CVTTSS2SIrm:
358 case X86::VCVTTSS2SIrm:
359 case X86::VCVTTSS2SIZrm:
360 case X86::CVTSI2SDrm:
361 case X86::VCVTSI2SDrm:
362 case X86::VCVTSI2SDZrm:
363 case X86::CVTSI2SSrm:
364 case X86::VCVTSI2SSrm:
365 case X86::VCVTSI2SSZrm:
366 case X86::CVTSI642SDrm:
367 case X86::VCVTSI642SDrm:
368 case X86::VCVTSI642SDZrm:
369 case X86::CVTSI642SSrm:
370 case X86::VCVTSI642SSrm:
371 case X86::VCVTSI642SSZrm:
372 case X86::CVTSS2SDrm:
373 case X86::VCVTSS2SDrm:
374 case X86::VCVTSS2SDZrm:
375 case X86::CVTSD2SSrm:
376 case X86::VCVTSD2SSrm:
377 case X86::VCVTSD2SSZrm:
379 case X86::VCVTTSD2USI64Zrm:
380 case X86::VCVTTSD2USIZrm:
381 case X86::VCVTTSS2USI64Zrm:
382 case X86::VCVTTSS2USIZrm:
383 case X86::VCVTUSI2SDZrm:
384 case X86::VCVTUSI642SDZrm:
385 case X86::VCVTUSI2SSZrm:
386 case X86::VCVTUSI642SSZrm:
390 case X86::MOV8rm_NOREX:
394 case X86::MOVSX16rm8:
395 case X86::MOVSX32rm16:
396 case X86::MOVSX32rm8:
397 case X86::MOVSX32rm8_NOREX:
398 case X86::MOVSX64rm16:
399 case X86::MOVSX64rm32:
400 case X86::MOVSX64rm8:
401 case X86::MOVZX16rm8:
402 case X86::MOVZX32rm16:
403 case X86::MOVZX32rm8:
404 case X86::MOVZX32rm8_NOREX:
405 case X86::MOVZX64rm16:
406 case X86::MOVZX64rm8:
415 if (isFrameInstr(
MI)) {
418 if (!isFrameSetup(
MI))
429 for (
auto E =
MBB->end();
I != E; ++
I) {
430 if (
I->getOpcode() == getCallFrameDestroyOpcode() ||
I->isCall())
436 if (
I->getOpcode() != getCallFrameDestroyOpcode())
439 return -(
I->getOperand(1).
getImm());
444 switch (
MI.getOpcode()) {
463 int &FrameIndex)
const {
483 case X86::KMOVBkm_EVEX:
488 case X86::KMOVWkm_EVEX:
490 case X86::VMOVSHZrm_alt:
495 case X86::MOVSSrm_alt:
497 case X86::VMOVSSrm_alt:
499 case X86::VMOVSSZrm_alt:
501 case X86::KMOVDkm_EVEX:
507 case X86::MOVSDrm_alt:
509 case X86::VMOVSDrm_alt:
511 case X86::VMOVSDZrm_alt:
512 case X86::MMX_MOVD64rm:
513 case X86::MMX_MOVQ64rm:
515 case X86::KMOVQkm_EVEX:
530 case X86::VMOVAPSZ128rm:
531 case X86::VMOVUPSZ128rm:
532 case X86::VMOVAPSZ128rm_NOVLX:
533 case X86::VMOVUPSZ128rm_NOVLX:
534 case X86::VMOVAPDZ128rm:
535 case X86::VMOVUPDZ128rm:
536 case X86::VMOVDQU8Z128rm:
537 case X86::VMOVDQU16Z128rm:
538 case X86::VMOVDQA32Z128rm:
539 case X86::VMOVDQU32Z128rm:
540 case X86::VMOVDQA64Z128rm:
541 case X86::VMOVDQU64Z128rm:
544 case X86::VMOVAPSYrm:
545 case X86::VMOVUPSYrm:
546 case X86::VMOVAPDYrm:
547 case X86::VMOVUPDYrm:
548 case X86::VMOVDQAYrm:
549 case X86::VMOVDQUYrm:
550 case X86::VMOVAPSZ256rm:
551 case X86::VMOVUPSZ256rm:
552 case X86::VMOVAPSZ256rm_NOVLX:
553 case X86::VMOVUPSZ256rm_NOVLX:
554 case X86::VMOVAPDZ256rm:
555 case X86::VMOVUPDZ256rm:
556 case X86::VMOVDQU8Z256rm:
557 case X86::VMOVDQU16Z256rm:
558 case X86::VMOVDQA32Z256rm:
559 case X86::VMOVDQU32Z256rm:
560 case X86::VMOVDQA64Z256rm:
561 case X86::VMOVDQU64Z256rm:
564 case X86::VMOVAPSZrm:
565 case X86::VMOVUPSZrm:
566 case X86::VMOVAPDZrm:
567 case X86::VMOVUPDZrm:
568 case X86::VMOVDQU8Zrm:
569 case X86::VMOVDQU16Zrm:
570 case X86::VMOVDQA32Zrm:
571 case X86::VMOVDQU32Zrm:
572 case X86::VMOVDQA64Zrm:
573 case X86::VMOVDQU64Zrm:
585 case X86::KMOVBmk_EVEX:
590 case X86::KMOVWmk_EVEX:
599 case X86::KMOVDmk_EVEX:
607 case X86::MMX_MOVD64mr:
608 case X86::MMX_MOVQ64mr:
609 case X86::MMX_MOVNTQmr:
611 case X86::KMOVQmk_EVEX:
626 case X86::VMOVUPSZ128mr:
627 case X86::VMOVAPSZ128mr:
628 case X86::VMOVUPSZ128mr_NOVLX:
629 case X86::VMOVAPSZ128mr_NOVLX:
630 case X86::VMOVUPDZ128mr:
631 case X86::VMOVAPDZ128mr:
632 case X86::VMOVDQA32Z128mr:
633 case X86::VMOVDQU32Z128mr:
634 case X86::VMOVDQA64Z128mr:
635 case X86::VMOVDQU64Z128mr:
636 case X86::VMOVDQU8Z128mr:
637 case X86::VMOVDQU16Z128mr:
640 case X86::VMOVUPSYmr:
641 case X86::VMOVAPSYmr:
642 case X86::VMOVUPDYmr:
643 case X86::VMOVAPDYmr:
644 case X86::VMOVDQUYmr:
645 case X86::VMOVDQAYmr:
646 case X86::VMOVUPSZ256mr:
647 case X86::VMOVAPSZ256mr:
648 case X86::VMOVUPSZ256mr_NOVLX:
649 case X86::VMOVAPSZ256mr_NOVLX:
650 case X86::VMOVUPDZ256mr:
651 case X86::VMOVAPDZ256mr:
652 case X86::VMOVDQU8Z256mr:
653 case X86::VMOVDQU16Z256mr:
654 case X86::VMOVDQA32Z256mr:
655 case X86::VMOVDQU32Z256mr:
656 case X86::VMOVDQA64Z256mr:
657 case X86::VMOVDQU64Z256mr:
660 case X86::VMOVUPSZmr:
661 case X86::VMOVAPSZmr:
662 case X86::VMOVUPDZmr:
663 case X86::VMOVAPDZmr:
664 case X86::VMOVDQU8Zmr:
665 case X86::VMOVDQU16Zmr:
666 case X86::VMOVDQA32Zmr:
667 case X86::VMOVDQU32Zmr:
668 case X86::VMOVDQA64Zmr:
669 case X86::VMOVDQU64Zmr:
677 int &FrameIndex)
const {
686 if (
MI.getOperand(0).getSubReg() == 0 && isFrameOperand(
MI, 1, FrameIndex))
687 return MI.getOperand(0).getReg();
692 int &FrameIndex)
const {
703 return MI.getOperand(0).getReg();
710 int &FrameIndex)
const {
720 isFrameOperand(
MI, 0, FrameIndex))
726 int &FrameIndex)
const {
746 if (!BaseReg.isVirtual())
748 bool isPICBase =
false;
750 if (
DefMI.getOpcode() != X86::MOVPC32r)
752 assert(!isPICBase &&
"More than one PIC base?");
760 switch (
MI.getOpcode()) {
766 case X86::IMPLICIT_DEF:
769 case X86::LOAD_STACK_GUARD:
776 case X86::AVX1_SETALLONES:
777 case X86::AVX2_SETALLONES:
778 case X86::AVX512_128_SET0:
779 case X86::AVX512_256_SET0:
780 case X86::AVX512_512_SET0:
781 case X86::AVX512_128_SETALLONES:
782 case X86::AVX512_256_SETALLONES:
783 case X86::AVX512_512_SETALLONES:
784 case X86::AVX512_FsFLD0SD:
785 case X86::AVX512_FsFLD0SH:
786 case X86::AVX512_FsFLD0SS:
787 case X86::AVX512_FsFLD0F128:
792 case X86::FsFLD0F128:
802 case X86::MOV32ImmSExti8:
807 case X86::MOV64ImmSExti8:
809 case X86::V_SETALLONES:
815 case X86::PTILEZEROV:
819 case X86::MOV8rm_NOREX:
824 case X86::MOVSSrm_alt:
826 case X86::MOVSDrm_alt:
834 case X86::VMOVSSrm_alt:
836 case X86::VMOVSDrm_alt:
843 case X86::VMOVAPSYrm:
844 case X86::VMOVUPSYrm:
845 case X86::VMOVAPDYrm:
846 case X86::VMOVUPDYrm:
847 case X86::VMOVDQAYrm:
848 case X86::VMOVDQUYrm:
849 case X86::MMX_MOVD64rm:
850 case X86::MMX_MOVQ64rm:
851 case X86::VBROADCASTSSrm:
852 case X86::VBROADCASTSSYrm:
853 case X86::VBROADCASTSDYrm:
855 case X86::VPBROADCASTBZ128rm:
856 case X86::VPBROADCASTBZ256rm:
857 case X86::VPBROADCASTBZrm:
858 case X86::VBROADCASTF32X2Z256rm:
859 case X86::VBROADCASTF32X2Zrm:
860 case X86::VBROADCASTI32X2Z128rm:
861 case X86::VBROADCASTI32X2Z256rm:
862 case X86::VBROADCASTI32X2Zrm:
863 case X86::VPBROADCASTWZ128rm:
864 case X86::VPBROADCASTWZ256rm:
865 case X86::VPBROADCASTWZrm:
866 case X86::VPBROADCASTDZ128rm:
867 case X86::VPBROADCASTDZ256rm:
868 case X86::VPBROADCASTDZrm:
869 case X86::VBROADCASTSSZ128rm:
870 case X86::VBROADCASTSSZ256rm:
871 case X86::VBROADCASTSSZrm:
872 case X86::VPBROADCASTQZ128rm:
873 case X86::VPBROADCASTQZ256rm:
874 case X86::VPBROADCASTQZrm:
875 case X86::VBROADCASTSDZ256rm:
876 case X86::VBROADCASTSDZrm:
878 case X86::VMOVSSZrm_alt:
880 case X86::VMOVSDZrm_alt:
882 case X86::VMOVSHZrm_alt:
883 case X86::VMOVAPDZ128rm:
884 case X86::VMOVAPDZ256rm:
885 case X86::VMOVAPDZrm:
886 case X86::VMOVAPSZ128rm:
887 case X86::VMOVAPSZ256rm:
888 case X86::VMOVAPSZ128rm_NOVLX:
889 case X86::VMOVAPSZ256rm_NOVLX:
890 case X86::VMOVAPSZrm:
891 case X86::VMOVDQA32Z128rm:
892 case X86::VMOVDQA32Z256rm:
893 case X86::VMOVDQA32Zrm:
894 case X86::VMOVDQA64Z128rm:
895 case X86::VMOVDQA64Z256rm:
896 case X86::VMOVDQA64Zrm:
897 case X86::VMOVDQU16Z128rm:
898 case X86::VMOVDQU16Z256rm:
899 case X86::VMOVDQU16Zrm:
900 case X86::VMOVDQU32Z128rm:
901 case X86::VMOVDQU32Z256rm:
902 case X86::VMOVDQU32Zrm:
903 case X86::VMOVDQU64Z128rm:
904 case X86::VMOVDQU64Z256rm:
905 case X86::VMOVDQU64Zrm:
906 case X86::VMOVDQU8Z128rm:
907 case X86::VMOVDQU8Z256rm:
908 case X86::VMOVDQU8Zrm:
909 case X86::VMOVUPDZ128rm:
910 case X86::VMOVUPDZ256rm:
911 case X86::VMOVUPDZrm:
912 case X86::VMOVUPSZ128rm:
913 case X86::VMOVUPSZ256rm:
914 case X86::VMOVUPSZ128rm_NOVLX:
915 case X86::VMOVUPSZ256rm_NOVLX:
916 case X86::VMOVUPSZrm: {
922 MI.isDereferenceableInvariantLoad()) {
924 if (BaseReg == 0 || BaseReg == X86::RIP)
966 if (ClobbersEFLAGS &&
MBB.computeRegisterLiveness(&
TRI, X86::EFLAGS,
I) !=
1001 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS &&
1011 unsigned ShiftAmtOperandIdx) {
1013 unsigned ShiftCountMask = (
MI.getDesc().TSFlags &
X86II::REX_W) ? 63 : 31;
1014 unsigned Imm =
MI.getOperand(ShiftAmtOperandIdx).getImm();
1015 return Imm & ShiftCountMask;
1026 return ShAmt < 4 && ShAmt > 0;
1033 bool &NoSignFlag,
bool &ClearsOverflowFlag) {
1034 if (!(CmpValDefInstr.
getOpcode() == X86::SUBREG_TO_REG &&
1035 CmpInstr.
getOpcode() == X86::TEST64rr) &&
1036 !(CmpValDefInstr.
getOpcode() == X86::COPY &&
1044 "CmpInstr is an analyzable TEST16rr/TEST64rr, and "
1045 "`X86InstrInfo::analyzeCompare` requires two reg operands are the"
1054 "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG or TEST16rr "
1055 "is a user of COPY sub16bit.");
1057 if (CmpInstr.
getOpcode() == X86::TEST16rr) {
1066 if (!((VregDefInstr->
getOpcode() == X86::AND32ri ||
1067 VregDefInstr->
getOpcode() == X86::AND64ri32) &&
1072 if (CmpInstr.
getOpcode() == X86::TEST64rr) {
1081 assert(VregDefInstr &&
"Must have a definition (SSA)");
1091 if (X86::isAND(VregDefInstr->
getOpcode()) &&
1112 if (Instr.modifiesRegister(X86::EFLAGS,
TRI))
1116 *AndInstr = VregDefInstr;
1137 ClearsOverflowFlag =
true;
1145 unsigned &NewSrcSubReg,
bool &isKill,
1151 RC =
Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1153 RC =
Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1156 unsigned SubReg = Src.getSubReg();
1157 isKill =
MI.killsRegister(SrcReg,
nullptr);
1159 NewSrcSubReg = X86::NoSubRegister;
1163 if (
Opc != X86::LEA64_32r) {
1166 assert(!Src.isUndef() &&
"Undef op doesn't need optimization");
1183 assert(!Src.isUndef() &&
"Undef op doesn't need optimization");
1188 NewSrcSubReg = X86::NoSubRegister;
1214MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
unsigned MIOpc,
1218 bool Is8BitOp)
const {
1223 RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1224 *RegInfo.getRegClass(
MI.getOperand(0).getReg())) == 16) &&
1225 "Unexpected type for LEA transform");
1234 if (!Subtarget.is64Bit())
1237 unsigned Opcode = X86::LEA64_32r;
1238 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1239 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1252 unsigned SrcSubReg =
MI.getOperand(1).getSubReg();
1254 unsigned Src2SubReg;
1255 bool IsDead =
MI.getOperand(0).isDead();
1256 bool IsKill =
MI.getOperand(1).isKill();
1257 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1258 assert(!
MI.getOperand(1).isUndef() &&
"Undef op doesn't need optimization");
1270#define CASE_NF(OP) \
1278 unsigned ShAmt =
MI.getOperand(2).getImm();
1296 case X86::ADD8ri_DB:
1297 case X86::ADD16ri_DB:
1302 case X86::ADD8rr_DB:
1303 case X86::ADD16rr_DB: {
1304 Src2 =
MI.getOperand(2).getReg();
1305 Src2SubReg =
MI.getOperand(2).getSubReg();
1306 bool IsKill2 =
MI.getOperand(2).isKill();
1307 assert(!
MI.getOperand(2).isUndef() &&
"Undef op doesn't need optimization");
1311 addRegReg(MIB, InRegLEA,
true, X86::NoSubRegister, InRegLEA,
false,
1312 X86::NoSubRegister);
1314 if (Subtarget.is64Bit())
1320 ImpDef2 =
BuildMI(
MBB, &*MIB,
MI.getDebugLoc(),
get(X86::IMPLICIT_DEF),
1322 InsMI2 =
BuildMI(
MBB, &*MIB,
MI.getDebugLoc(),
get(TargetOpcode::COPY))
1325 addRegReg(MIB, InRegLEA,
true, X86::NoSubRegister, InRegLEA2,
true,
1326 X86::NoSubRegister);
1328 if (LV && IsKill2 && InsMI2)
1334 MachineInstr *NewMI = MIB;
1335 MachineInstr *ExtMI =
1383 LiveRange::Segment *DestSeg =
1424 if (
MI.getNumOperands() > 2)
1425 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).isUndef())
1430 unsigned SrcSubReg, SrcSubReg2;
1431 bool Is64Bit = Subtarget.is64Bit();
1433 bool Is8BitOp =
false;
1434 unsigned NumRegOperands = 2;
1435 unsigned MIOpc =
MI.getOpcode();
1440 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1447 Src.getReg(), &X86::GR64_NOSPRegClass))
1450 NewMI =
BuildMI(MF,
MI.getDebugLoc(),
get(X86::LEA64r))
1460 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1465 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1471 isKill, ImplicitOp, LV, LIS))
1482 if (ImplicitOp.
getReg() != 0)
1483 MIB.
add(ImplicitOp);
1487 if (LV && SrcReg != Src.getReg())
1495 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1499 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1503 assert(
MI.getNumOperands() >= 2 &&
"Unknown inc instruction!");
1504 unsigned Opc = (MIOpc == X86::INC64r || MIOpc == X86::INC64r_NF)
1506 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1510 isKill, ImplicitOp, LV, LIS))
1516 if (ImplicitOp.
getReg() != 0)
1517 MIB.
add(ImplicitOp);
1522 if (LV && SrcReg != Src.getReg())
1528 assert(
MI.getNumOperands() >= 2 &&
"Unknown dec instruction!");
1529 unsigned Opc = (MIOpc == X86::DEC64r || MIOpc == X86::DEC64r_NF)
1531 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1536 isKill, ImplicitOp, LV, LIS))
1542 if (ImplicitOp.
getReg() != 0)
1543 MIB.
add(ImplicitOp);
1548 if (LV && SrcReg != Src.getReg())
1558 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1561 case X86::ADD64rr_DB:
1562 case X86::ADD32rr_DB: {
1563 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1565 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_NF ||
1566 MIOpc == X86::ADD64rr_DB)
1569 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1575 isKill2, ImplicitOp2, LV, LIS))
1580 if (Src.getReg() == Src2.
getReg()) {
1585 SrcSubReg = SrcSubReg2;
1588 isKill, ImplicitOp, LV, LIS))
1593 if (ImplicitOp.
getReg() != 0)
1594 MIB.
add(ImplicitOp);
1595 if (ImplicitOp2.
getReg() != 0)
1596 MIB.
add(ImplicitOp2);
1599 addRegReg(MIB, SrcReg, isKill, SrcSubReg, SrcReg2, isKill2, SrcSubReg2);
1603 if (SrcReg2 != Src2.
getReg())
1605 if (SrcReg != SrcReg2 && SrcReg != Src.getReg())
1612 case X86::ADD8rr_DB:
1616 case X86::ADD16rr_DB:
1617 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1619 case X86::ADD64ri32_DB:
1620 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1622 BuildMI(MF,
MI.getDebugLoc(),
get(X86::LEA64r)).add(Dest).add(Src),
1626 case X86::ADD32ri_DB: {
1627 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1628 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1633 isKill, ImplicitOp, LV, LIS))
1640 if (ImplicitOp.
getReg() != 0)
1641 MIB.
add(ImplicitOp);
1646 if (LV && SrcReg != Src.getReg())
1651 case X86::ADD8ri_DB:
1655 case X86::ADD16ri_DB:
1656 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1662 if (!
MI.getOperand(2).isImm())
1664 int64_t Imm =
MI.getOperand(2).getImm();
1668 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1669 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1674 isKill, ImplicitOp, LV, LIS))
1681 if (ImplicitOp.
getReg() != 0)
1682 MIB.
add(ImplicitOp);
1687 if (LV && SrcReg != Src.getReg())
1693 if (!
MI.getOperand(2).isImm())
1695 int64_t Imm =
MI.getOperand(2).getImm();
1699 assert(
MI.getNumOperands() >= 3 &&
"Unknown sub instruction!");
1707 case X86::VMOVDQU8Z128rmk:
1708 case X86::VMOVDQU8Z256rmk:
1709 case X86::VMOVDQU8Zrmk:
1710 case X86::VMOVDQU16Z128rmk:
1711 case X86::VMOVDQU16Z256rmk:
1712 case X86::VMOVDQU16Zrmk:
1713 case X86::VMOVDQU32Z128rmk:
1714 case X86::VMOVDQA32Z128rmk:
1715 case X86::VMOVDQU32Z256rmk:
1716 case X86::VMOVDQA32Z256rmk:
1717 case X86::VMOVDQU32Zrmk:
1718 case X86::VMOVDQA32Zrmk:
1719 case X86::VMOVDQU64Z128rmk:
1720 case X86::VMOVDQA64Z128rmk:
1721 case X86::VMOVDQU64Z256rmk:
1722 case X86::VMOVDQA64Z256rmk:
1723 case X86::VMOVDQU64Zrmk:
1724 case X86::VMOVDQA64Zrmk:
1725 case X86::VMOVUPDZ128rmk:
1726 case X86::VMOVAPDZ128rmk:
1727 case X86::VMOVUPDZ256rmk:
1728 case X86::VMOVAPDZ256rmk:
1729 case X86::VMOVUPDZrmk:
1730 case X86::VMOVAPDZrmk:
1731 case X86::VMOVUPSZ128rmk:
1732 case X86::VMOVAPSZ128rmk:
1733 case X86::VMOVUPSZ256rmk:
1734 case X86::VMOVAPSZ256rmk:
1735 case X86::VMOVUPSZrmk:
1736 case X86::VMOVAPSZrmk:
1737 case X86::VBROADCASTSDZ256rmk:
1738 case X86::VBROADCASTSDZrmk:
1739 case X86::VBROADCASTSSZ128rmk:
1740 case X86::VBROADCASTSSZ256rmk:
1741 case X86::VBROADCASTSSZrmk:
1742 case X86::VPBROADCASTDZ128rmk:
1743 case X86::VPBROADCASTDZ256rmk:
1744 case X86::VPBROADCASTDZrmk:
1745 case X86::VPBROADCASTQZ128rmk:
1746 case X86::VPBROADCASTQZ256rmk:
1747 case X86::VPBROADCASTQZrmk: {
1752 case X86::VMOVDQU8Z128rmk:
1753 Opc = X86::VPBLENDMBZ128rmk;
1755 case X86::VMOVDQU8Z256rmk:
1756 Opc = X86::VPBLENDMBZ256rmk;
1758 case X86::VMOVDQU8Zrmk:
1759 Opc = X86::VPBLENDMBZrmk;
1761 case X86::VMOVDQU16Z128rmk:
1762 Opc = X86::VPBLENDMWZ128rmk;
1764 case X86::VMOVDQU16Z256rmk:
1765 Opc = X86::VPBLENDMWZ256rmk;
1767 case X86::VMOVDQU16Zrmk:
1768 Opc = X86::VPBLENDMWZrmk;
1770 case X86::VMOVDQU32Z128rmk:
1771 Opc = X86::VPBLENDMDZ128rmk;
1773 case X86::VMOVDQU32Z256rmk:
1774 Opc = X86::VPBLENDMDZ256rmk;
1776 case X86::VMOVDQU32Zrmk:
1777 Opc = X86::VPBLENDMDZrmk;
1779 case X86::VMOVDQU64Z128rmk:
1780 Opc = X86::VPBLENDMQZ128rmk;
1782 case X86::VMOVDQU64Z256rmk:
1783 Opc = X86::VPBLENDMQZ256rmk;
1785 case X86::VMOVDQU64Zrmk:
1786 Opc = X86::VPBLENDMQZrmk;
1788 case X86::VMOVUPDZ128rmk:
1789 Opc = X86::VBLENDMPDZ128rmk;
1791 case X86::VMOVUPDZ256rmk:
1792 Opc = X86::VBLENDMPDZ256rmk;
1794 case X86::VMOVUPDZrmk:
1795 Opc = X86::VBLENDMPDZrmk;
1797 case X86::VMOVUPSZ128rmk:
1798 Opc = X86::VBLENDMPSZ128rmk;
1800 case X86::VMOVUPSZ256rmk:
1801 Opc = X86::VBLENDMPSZ256rmk;
1803 case X86::VMOVUPSZrmk:
1804 Opc = X86::VBLENDMPSZrmk;
1806 case X86::VMOVDQA32Z128rmk:
1807 Opc = X86::VPBLENDMDZ128rmk;
1809 case X86::VMOVDQA32Z256rmk:
1810 Opc = X86::VPBLENDMDZ256rmk;
1812 case X86::VMOVDQA32Zrmk:
1813 Opc = X86::VPBLENDMDZrmk;
1815 case X86::VMOVDQA64Z128rmk:
1816 Opc = X86::VPBLENDMQZ128rmk;
1818 case X86::VMOVDQA64Z256rmk:
1819 Opc = X86::VPBLENDMQZ256rmk;
1821 case X86::VMOVDQA64Zrmk:
1822 Opc = X86::VPBLENDMQZrmk;
1824 case X86::VMOVAPDZ128rmk:
1825 Opc = X86::VBLENDMPDZ128rmk;
1827 case X86::VMOVAPDZ256rmk:
1828 Opc = X86::VBLENDMPDZ256rmk;
1830 case X86::VMOVAPDZrmk:
1831 Opc = X86::VBLENDMPDZrmk;
1833 case X86::VMOVAPSZ128rmk:
1834 Opc = X86::VBLENDMPSZ128rmk;
1836 case X86::VMOVAPSZ256rmk:
1837 Opc = X86::VBLENDMPSZ256rmk;
1839 case X86::VMOVAPSZrmk:
1840 Opc = X86::VBLENDMPSZrmk;
1842 case X86::VBROADCASTSDZ256rmk:
1843 Opc = X86::VBLENDMPDZ256rmbk;
1845 case X86::VBROADCASTSDZrmk:
1846 Opc = X86::VBLENDMPDZrmbk;
1848 case X86::VBROADCASTSSZ128rmk:
1849 Opc = X86::VBLENDMPSZ128rmbk;
1851 case X86::VBROADCASTSSZ256rmk:
1852 Opc = X86::VBLENDMPSZ256rmbk;
1854 case X86::VBROADCASTSSZrmk:
1855 Opc = X86::VBLENDMPSZrmbk;
1857 case X86::VPBROADCASTDZ128rmk:
1858 Opc = X86::VPBLENDMDZ128rmbk;
1860 case X86::VPBROADCASTDZ256rmk:
1861 Opc = X86::VPBLENDMDZ256rmbk;
1863 case X86::VPBROADCASTDZrmk:
1864 Opc = X86::VPBLENDMDZrmbk;
1866 case X86::VPBROADCASTQZ128rmk:
1867 Opc = X86::VPBLENDMQZ128rmbk;
1869 case X86::VPBROADCASTQZ256rmk:
1870 Opc = X86::VPBLENDMQZ256rmbk;
1872 case X86::VPBROADCASTQZrmk:
1873 Opc = X86::VPBLENDMQZrmbk;
1879 .
add(
MI.getOperand(2))
1881 .
add(
MI.getOperand(3))
1882 .
add(
MI.getOperand(4))
1883 .
add(
MI.getOperand(5))
1884 .
add(
MI.getOperand(6))
1885 .
add(
MI.getOperand(7));
1890 case X86::VMOVDQU8Z128rrk:
1891 case X86::VMOVDQU8Z256rrk:
1892 case X86::VMOVDQU8Zrrk:
1893 case X86::VMOVDQU16Z128rrk:
1894 case X86::VMOVDQU16Z256rrk:
1895 case X86::VMOVDQU16Zrrk:
1896 case X86::VMOVDQU32Z128rrk:
1897 case X86::VMOVDQA32Z128rrk:
1898 case X86::VMOVDQU32Z256rrk:
1899 case X86::VMOVDQA32Z256rrk:
1900 case X86::VMOVDQU32Zrrk:
1901 case X86::VMOVDQA32Zrrk:
1902 case X86::VMOVDQU64Z128rrk:
1903 case X86::VMOVDQA64Z128rrk:
1904 case X86::VMOVDQU64Z256rrk:
1905 case X86::VMOVDQA64Z256rrk:
1906 case X86::VMOVDQU64Zrrk:
1907 case X86::VMOVDQA64Zrrk:
1908 case X86::VMOVUPDZ128rrk:
1909 case X86::VMOVAPDZ128rrk:
1910 case X86::VMOVUPDZ256rrk:
1911 case X86::VMOVAPDZ256rrk:
1912 case X86::VMOVUPDZrrk:
1913 case X86::VMOVAPDZrrk:
1914 case X86::VMOVUPSZ128rrk:
1915 case X86::VMOVAPSZ128rrk:
1916 case X86::VMOVUPSZ256rrk:
1917 case X86::VMOVAPSZ256rrk:
1918 case X86::VMOVUPSZrrk:
1919 case X86::VMOVAPSZrrk: {
1924 case X86::VMOVDQU8Z128rrk:
1925 Opc = X86::VPBLENDMBZ128rrk;
1927 case X86::VMOVDQU8Z256rrk:
1928 Opc = X86::VPBLENDMBZ256rrk;
1930 case X86::VMOVDQU8Zrrk:
1931 Opc = X86::VPBLENDMBZrrk;
1933 case X86::VMOVDQU16Z128rrk:
1934 Opc = X86::VPBLENDMWZ128rrk;
1936 case X86::VMOVDQU16Z256rrk:
1937 Opc = X86::VPBLENDMWZ256rrk;
1939 case X86::VMOVDQU16Zrrk:
1940 Opc = X86::VPBLENDMWZrrk;
1942 case X86::VMOVDQU32Z128rrk:
1943 Opc = X86::VPBLENDMDZ128rrk;
1945 case X86::VMOVDQU32Z256rrk:
1946 Opc = X86::VPBLENDMDZ256rrk;
1948 case X86::VMOVDQU32Zrrk:
1949 Opc = X86::VPBLENDMDZrrk;
1951 case X86::VMOVDQU64Z128rrk:
1952 Opc = X86::VPBLENDMQZ128rrk;
1954 case X86::VMOVDQU64Z256rrk:
1955 Opc = X86::VPBLENDMQZ256rrk;
1957 case X86::VMOVDQU64Zrrk:
1958 Opc = X86::VPBLENDMQZrrk;
1960 case X86::VMOVUPDZ128rrk:
1961 Opc = X86::VBLENDMPDZ128rrk;
1963 case X86::VMOVUPDZ256rrk:
1964 Opc = X86::VBLENDMPDZ256rrk;
1966 case X86::VMOVUPDZrrk:
1967 Opc = X86::VBLENDMPDZrrk;
1969 case X86::VMOVUPSZ128rrk:
1970 Opc = X86::VBLENDMPSZ128rrk;
1972 case X86::VMOVUPSZ256rrk:
1973 Opc = X86::VBLENDMPSZ256rrk;
1975 case X86::VMOVUPSZrrk:
1976 Opc = X86::VBLENDMPSZrrk;
1978 case X86::VMOVDQA32Z128rrk:
1979 Opc = X86::VPBLENDMDZ128rrk;
1981 case X86::VMOVDQA32Z256rrk:
1982 Opc = X86::VPBLENDMDZ256rrk;
1984 case X86::VMOVDQA32Zrrk:
1985 Opc = X86::VPBLENDMDZrrk;
1987 case X86::VMOVDQA64Z128rrk:
1988 Opc = X86::VPBLENDMQZ128rrk;
1990 case X86::VMOVDQA64Z256rrk:
1991 Opc = X86::VPBLENDMQZ256rrk;
1993 case X86::VMOVDQA64Zrrk:
1994 Opc = X86::VPBLENDMQZrrk;
1996 case X86::VMOVAPDZ128rrk:
1997 Opc = X86::VBLENDMPDZ128rrk;
1999 case X86::VMOVAPDZ256rrk:
2000 Opc = X86::VBLENDMPDZ256rrk;
2002 case X86::VMOVAPDZrrk:
2003 Opc = X86::VBLENDMPDZrrk;
2005 case X86::VMOVAPSZ128rrk:
2006 Opc = X86::VBLENDMPSZ128rrk;
2008 case X86::VMOVAPSZ256rrk:
2009 Opc = X86::VBLENDMPSZ256rrk;
2011 case X86::VMOVAPSZrrk:
2012 Opc = X86::VBLENDMPSZrrk;
2018 .
add(
MI.getOperand(2))
2020 .
add(
MI.getOperand(3));
2031 for (
unsigned I = 0;
I < NumRegOperands; ++
I) {
2033 if (
Op.isReg() && (
Op.isDead() ||
Op.isKill()))
2039 MBB.insert(
MI.getIterator(), NewMI);
2060 unsigned SrcOpIdx2) {
2062 if (SrcOpIdx1 > SrcOpIdx2)
2065 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
2071 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
2073 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
2075 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
2084 unsigned Opc =
MI.getOpcode();
2093 "Intrinsic instructions can't commute operand 1");
2098 assert(Case < 3 &&
"Unexpected case number!");
2103 const unsigned Form132Index = 0;
2104 const unsigned Form213Index = 1;
2105 const unsigned Form231Index = 2;
2106 static const unsigned FormMapping[][3] = {
2111 {Form231Index, Form213Index, Form132Index},
2116 {Form132Index, Form231Index, Form213Index},
2121 {Form213Index, Form132Index, Form231Index}};
2123 unsigned FMAForms[3];
2129 for (
unsigned FormIndex = 0; FormIndex < 3; FormIndex++)
2130 if (
Opc == FMAForms[FormIndex])
2131 return FMAForms[FormMapping[Case][FormIndex]];
2137 unsigned SrcOpIdx2) {
2141 assert(Case < 3 &&
"Unexpected case value!");
2144 static const uint8_t SwapMasks[3][4] = {
2145 {0x04, 0x10, 0x08, 0x20},
2146 {0x02, 0x10, 0x08, 0x40},
2147 {0x02, 0x04, 0x20, 0x40},
2150 uint8_t Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
2152 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
2153 SwapMasks[Case][2] | SwapMasks[Case][3]);
2155 if (Imm & SwapMasks[Case][0])
2156 NewImm |= SwapMasks[Case][1];
2157 if (Imm & SwapMasks[Case][1])
2158 NewImm |= SwapMasks[Case][0];
2159 if (Imm & SwapMasks[Case][2])
2160 NewImm |= SwapMasks[Case][3];
2161 if (Imm & SwapMasks[Case][3])
2162 NewImm |= SwapMasks[Case][2];
2163 MI.getOperand(
MI.getNumOperands() - 1).setImm(NewImm);
2169#define VPERM_CASES(Suffix) \
2170 case X86::VPERMI2##Suffix##Z128rr: \
2171 case X86::VPERMT2##Suffix##Z128rr: \
2172 case X86::VPERMI2##Suffix##Z256rr: \
2173 case X86::VPERMT2##Suffix##Z256rr: \
2174 case X86::VPERMI2##Suffix##Zrr: \
2175 case X86::VPERMT2##Suffix##Zrr: \
2176 case X86::VPERMI2##Suffix##Z128rm: \
2177 case X86::VPERMT2##Suffix##Z128rm: \
2178 case X86::VPERMI2##Suffix##Z256rm: \
2179 case X86::VPERMT2##Suffix##Z256rm: \
2180 case X86::VPERMI2##Suffix##Zrm: \
2181 case X86::VPERMT2##Suffix##Zrm: \
2182 case X86::VPERMI2##Suffix##Z128rrkz: \
2183 case X86::VPERMT2##Suffix##Z128rrkz: \
2184 case X86::VPERMI2##Suffix##Z256rrkz: \
2185 case X86::VPERMT2##Suffix##Z256rrkz: \
2186 case X86::VPERMI2##Suffix##Zrrkz: \
2187 case X86::VPERMT2##Suffix##Zrrkz: \
2188 case X86::VPERMI2##Suffix##Z128rmkz: \
2189 case X86::VPERMT2##Suffix##Z128rmkz: \
2190 case X86::VPERMI2##Suffix##Z256rmkz: \
2191 case X86::VPERMT2##Suffix##Z256rmkz: \
2192 case X86::VPERMI2##Suffix##Zrmkz: \
2193 case X86::VPERMT2##Suffix##Zrmkz:
2195#define VPERM_CASES_BROADCAST(Suffix) \
2196 VPERM_CASES(Suffix) \
2197 case X86::VPERMI2##Suffix##Z128rmb: \
2198 case X86::VPERMT2##Suffix##Z128rmb: \
2199 case X86::VPERMI2##Suffix##Z256rmb: \
2200 case X86::VPERMT2##Suffix##Z256rmb: \
2201 case X86::VPERMI2##Suffix##Zrmb: \
2202 case X86::VPERMT2##Suffix##Zrmb: \
2203 case X86::VPERMI2##Suffix##Z128rmbkz: \
2204 case X86::VPERMT2##Suffix##Z128rmbkz: \
2205 case X86::VPERMI2##Suffix##Z256rmbkz: \
2206 case X86::VPERMT2##Suffix##Z256rmbkz: \
2207 case X86::VPERMI2##Suffix##Zrmbkz: \
2208 case X86::VPERMT2##Suffix##Zrmbkz:
2221#undef VPERM_CASES_BROADCAST
2228#define VPERM_CASES(Orig, New) \
2229 case X86::Orig##Z128rr: \
2230 return X86::New##Z128rr; \
2231 case X86::Orig##Z128rrkz: \
2232 return X86::New##Z128rrkz; \
2233 case X86::Orig##Z128rm: \
2234 return X86::New##Z128rm; \
2235 case X86::Orig##Z128rmkz: \
2236 return X86::New##Z128rmkz; \
2237 case X86::Orig##Z256rr: \
2238 return X86::New##Z256rr; \
2239 case X86::Orig##Z256rrkz: \
2240 return X86::New##Z256rrkz; \
2241 case X86::Orig##Z256rm: \
2242 return X86::New##Z256rm; \
2243 case X86::Orig##Z256rmkz: \
2244 return X86::New##Z256rmkz; \
2245 case X86::Orig##Zrr: \
2246 return X86::New##Zrr; \
2247 case X86::Orig##Zrrkz: \
2248 return X86::New##Zrrkz; \
2249 case X86::Orig##Zrm: \
2250 return X86::New##Zrm; \
2251 case X86::Orig##Zrmkz: \
2252 return X86::New##Zrmkz;
2254#define VPERM_CASES_BROADCAST(Orig, New) \
2255 VPERM_CASES(Orig, New) \
2256 case X86::Orig##Z128rmb: \
2257 return X86::New##Z128rmb; \
2258 case X86::Orig##Z128rmbkz: \
2259 return X86::New##Z128rmbkz; \
2260 case X86::Orig##Z256rmb: \
2261 return X86::New##Z256rmb; \
2262 case X86::Orig##Z256rmbkz: \
2263 return X86::New##Z256rmbkz; \
2264 case X86::Orig##Zrmb: \
2265 return X86::New##Zrmb; \
2266 case X86::Orig##Zrmbkz: \
2267 return X86::New##Zrmbkz;
2285#undef VPERM_CASES_BROADCAST
2291 unsigned OpIdx2)
const {
2293 return std::exchange(NewMI,
false)
2294 ?
MI.getParent()->getParent()->CloneMachineInstr(&
MI)
2298 unsigned Opc =
MI.getOpcode();
2300#define CASE_ND(OP) \
2316#define FROM_TO_SIZE(A, B, S) \
2322 Opc = X86::B##_ND; \
2330 Opc = X86::A##_ND; \
2339 WorkingMI = CloneIfNew(
MI);
2348 WorkingMI = CloneIfNew(
MI);
2350 get(X86::PFSUBRrr ==
Opc ? X86::PFSUBrr : X86::PFSUBRrr));
2352 case X86::BLENDPDrri:
2353 case X86::BLENDPSrri:
2354 case X86::PBLENDWrri:
2355 case X86::VBLENDPDrri:
2356 case X86::VBLENDPSrri:
2357 case X86::VBLENDPDYrri:
2358 case X86::VBLENDPSYrri:
2359 case X86::VPBLENDDrri:
2360 case X86::VPBLENDWrri:
2361 case X86::VPBLENDDYrri:
2362 case X86::VPBLENDWYrri: {
2367 case X86::BLENDPDrri:
2368 Mask = (int8_t)0x03;
2370 case X86::BLENDPSrri:
2371 Mask = (int8_t)0x0F;
2373 case X86::PBLENDWrri:
2374 Mask = (int8_t)0xFF;
2376 case X86::VBLENDPDrri:
2377 Mask = (int8_t)0x03;
2379 case X86::VBLENDPSrri:
2380 Mask = (int8_t)0x0F;
2382 case X86::VBLENDPDYrri:
2383 Mask = (int8_t)0x0F;
2385 case X86::VBLENDPSYrri:
2386 Mask = (int8_t)0xFF;
2388 case X86::VPBLENDDrri:
2389 Mask = (int8_t)0x0F;
2391 case X86::VPBLENDWrri:
2392 Mask = (int8_t)0xFF;
2394 case X86::VPBLENDDYrri:
2395 Mask = (int8_t)0xFF;
2397 case X86::VPBLENDWYrri:
2398 Mask = (int8_t)0xFF;
2404 int8_t Imm =
MI.getOperand(3).getImm() & Mask;
2405 WorkingMI = CloneIfNew(
MI);
2409 case X86::INSERTPSrri:
2410 case X86::VINSERTPSrri:
2411 case X86::VINSERTPSZrri: {
2412 unsigned Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
2413 unsigned ZMask = Imm & 15;
2414 unsigned DstIdx = (Imm >> 4) & 3;
2415 unsigned SrcIdx = (Imm >> 6) & 3;
2419 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2422 assert(AltIdx < 4 &&
"Illegal insertion index");
2423 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2424 WorkingMI = CloneIfNew(
MI);
2433 case X86::VMOVSSrr: {
2435 if (Subtarget.hasSSE41()) {
2441 Opc = X86::BLENDPDrri;
2445 Opc = X86::BLENDPSrri;
2449 Opc = X86::VBLENDPDrri;
2453 Opc = X86::VBLENDPSrri;
2458 WorkingMI = CloneIfNew(
MI);
2464 assert(
Opc == X86::MOVSDrr &&
"Only MOVSD can commute to SHUFPD");
2465 WorkingMI = CloneIfNew(
MI);
2470 case X86::SHUFPDrri: {
2472 assert(
MI.getOperand(3).getImm() == 0x02 &&
"Unexpected immediate!");
2473 WorkingMI = CloneIfNew(
MI);
2478 case X86::PCLMULQDQrri:
2479 case X86::VPCLMULQDQrri:
2480 case X86::VPCLMULQDQYrri:
2481 case X86::VPCLMULQDQZrri:
2482 case X86::VPCLMULQDQZ128rri:
2483 case X86::VPCLMULQDQZ256rri: {
2486 unsigned Imm =
MI.getOperand(3).getImm();
2487 unsigned Src1Hi = Imm & 0x01;
2488 unsigned Src2Hi = Imm & 0x10;
2489 WorkingMI = CloneIfNew(
MI);
2493 case X86::VPCMPBZ128rri:
2494 case X86::VPCMPUBZ128rri:
2495 case X86::VPCMPBZ256rri:
2496 case X86::VPCMPUBZ256rri:
2497 case X86::VPCMPBZrri:
2498 case X86::VPCMPUBZrri:
2499 case X86::VPCMPDZ128rri:
2500 case X86::VPCMPUDZ128rri:
2501 case X86::VPCMPDZ256rri:
2502 case X86::VPCMPUDZ256rri:
2503 case X86::VPCMPDZrri:
2504 case X86::VPCMPUDZrri:
2505 case X86::VPCMPQZ128rri:
2506 case X86::VPCMPUQZ128rri:
2507 case X86::VPCMPQZ256rri:
2508 case X86::VPCMPUQZ256rri:
2509 case X86::VPCMPQZrri:
2510 case X86::VPCMPUQZrri:
2511 case X86::VPCMPWZ128rri:
2512 case X86::VPCMPUWZ128rri:
2513 case X86::VPCMPWZ256rri:
2514 case X86::VPCMPUWZ256rri:
2515 case X86::VPCMPWZrri:
2516 case X86::VPCMPUWZrri:
2517 case X86::VPCMPBZ128rrik:
2518 case X86::VPCMPUBZ128rrik:
2519 case X86::VPCMPBZ256rrik:
2520 case X86::VPCMPUBZ256rrik:
2521 case X86::VPCMPBZrrik:
2522 case X86::VPCMPUBZrrik:
2523 case X86::VPCMPDZ128rrik:
2524 case X86::VPCMPUDZ128rrik:
2525 case X86::VPCMPDZ256rrik:
2526 case X86::VPCMPUDZ256rrik:
2527 case X86::VPCMPDZrrik:
2528 case X86::VPCMPUDZrrik:
2529 case X86::VPCMPQZ128rrik:
2530 case X86::VPCMPUQZ128rrik:
2531 case X86::VPCMPQZ256rrik:
2532 case X86::VPCMPUQZ256rrik:
2533 case X86::VPCMPQZrrik:
2534 case X86::VPCMPUQZrrik:
2535 case X86::VPCMPWZ128rrik:
2536 case X86::VPCMPUWZ128rrik:
2537 case X86::VPCMPWZ256rrik:
2538 case X86::VPCMPUWZ256rrik:
2539 case X86::VPCMPWZrrik:
2540 case X86::VPCMPUWZrrik:
2541 WorkingMI = CloneIfNew(
MI);
2545 MI.getOperand(
MI.getNumOperands() - 1).getImm() & 0x7));
2548 case X86::VPCOMUBri:
2550 case X86::VPCOMUDri:
2552 case X86::VPCOMUQri:
2554 case X86::VPCOMUWri:
2555 WorkingMI = CloneIfNew(
MI);
2560 case X86::VCMPSDZrri:
2561 case X86::VCMPSSZrri:
2562 case X86::VCMPPDZrri:
2563 case X86::VCMPPSZrri:
2564 case X86::VCMPSHZrri:
2565 case X86::VCMPPHZrri:
2566 case X86::VCMPPHZ128rri:
2567 case X86::VCMPPHZ256rri:
2568 case X86::VCMPPDZ128rri:
2569 case X86::VCMPPSZ128rri:
2570 case X86::VCMPPDZ256rri:
2571 case X86::VCMPPSZ256rri:
2572 case X86::VCMPPDZrrik:
2573 case X86::VCMPPSZrrik:
2574 case X86::VCMPPHZrrik:
2575 case X86::VCMPPDZ128rrik:
2576 case X86::VCMPPSZ128rrik:
2577 case X86::VCMPPHZ128rrik:
2578 case X86::VCMPPDZ256rrik:
2579 case X86::VCMPPSZ256rrik:
2580 case X86::VCMPPHZ256rrik:
2581 WorkingMI = CloneIfNew(
MI);
2584 MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 0x1f));
2586 case X86::VPERM2F128rri:
2587 case X86::VPERM2I128rri:
2591 WorkingMI = CloneIfNew(
MI);
2594 case X86::MOVHLPSrr:
2595 case X86::UNPCKHPDrr:
2596 case X86::VMOVHLPSrr:
2597 case X86::VUNPCKHPDrr:
2598 case X86::VMOVHLPSZrr:
2599 case X86::VUNPCKHPDZ128rr:
2600 assert(Subtarget.hasSSE2() &&
"Commuting MOVHLP/UNPCKHPD requires SSE2!");
2605 case X86::MOVHLPSrr:
2606 Opc = X86::UNPCKHPDrr;
2608 case X86::UNPCKHPDrr:
2609 Opc = X86::MOVHLPSrr;
2611 case X86::VMOVHLPSrr:
2612 Opc = X86::VUNPCKHPDrr;
2614 case X86::VUNPCKHPDrr:
2615 Opc = X86::VMOVHLPSrr;
2617 case X86::VMOVHLPSZrr:
2618 Opc = X86::VUNPCKHPDZ128rr;
2620 case X86::VUNPCKHPDZ128rr:
2621 Opc = X86::VMOVHLPSZrr;
2624 WorkingMI = CloneIfNew(
MI);
2630 WorkingMI = CloneIfNew(
MI);
2631 unsigned OpNo =
MI.getDesc().getNumOperands() - 1;
2636 case X86::VPTERNLOGDZrri:
2637 case X86::VPTERNLOGDZrmi:
2638 case X86::VPTERNLOGDZ128rri:
2639 case X86::VPTERNLOGDZ128rmi:
2640 case X86::VPTERNLOGDZ256rri:
2641 case X86::VPTERNLOGDZ256rmi:
2642 case X86::VPTERNLOGQZrri:
2643 case X86::VPTERNLOGQZrmi:
2644 case X86::VPTERNLOGQZ128rri:
2645 case X86::VPTERNLOGQZ128rmi:
2646 case X86::VPTERNLOGQZ256rri:
2647 case X86::VPTERNLOGQZ256rmi:
2648 case X86::VPTERNLOGDZrrik:
2649 case X86::VPTERNLOGDZ128rrik:
2650 case X86::VPTERNLOGDZ256rrik:
2651 case X86::VPTERNLOGQZrrik:
2652 case X86::VPTERNLOGQZ128rrik:
2653 case X86::VPTERNLOGQZ256rrik:
2654 case X86::VPTERNLOGDZrrikz:
2655 case X86::VPTERNLOGDZrmikz:
2656 case X86::VPTERNLOGDZ128rrikz:
2657 case X86::VPTERNLOGDZ128rmikz:
2658 case X86::VPTERNLOGDZ256rrikz:
2659 case X86::VPTERNLOGDZ256rmikz:
2660 case X86::VPTERNLOGQZrrikz:
2661 case X86::VPTERNLOGQZrmikz:
2662 case X86::VPTERNLOGQZ128rrikz:
2663 case X86::VPTERNLOGQZ128rmikz:
2664 case X86::VPTERNLOGQZ256rrikz:
2665 case X86::VPTERNLOGQZ256rmikz:
2666 case X86::VPTERNLOGDZ128rmbi:
2667 case X86::VPTERNLOGDZ256rmbi:
2668 case X86::VPTERNLOGDZrmbi:
2669 case X86::VPTERNLOGQZ128rmbi:
2670 case X86::VPTERNLOGQZ256rmbi:
2671 case X86::VPTERNLOGQZrmbi:
2672 case X86::VPTERNLOGDZ128rmbikz:
2673 case X86::VPTERNLOGDZ256rmbikz:
2674 case X86::VPTERNLOGDZrmbikz:
2675 case X86::VPTERNLOGQZ128rmbikz:
2676 case X86::VPTERNLOGQZ256rmbikz:
2677 case X86::VPTERNLOGQZrmbikz: {
2678 WorkingMI = CloneIfNew(
MI);
2684 WorkingMI = CloneIfNew(
MI);
2690 WorkingMI = CloneIfNew(
MI);
2699bool X86InstrInfo::findThreeSrcCommutedOpIndices(
const MachineInstr &
MI,
2700 unsigned &SrcOpIdx1,
2701 unsigned &SrcOpIdx2,
2702 bool IsIntrinsic)
const {
2705 unsigned FirstCommutableVecOp = 1;
2706 unsigned LastCommutableVecOp = 3;
2707 unsigned KMaskOp = -1U;
2730 FirstCommutableVecOp = 3;
2732 LastCommutableVecOp++;
2733 }
else if (IsIntrinsic) {
2736 FirstCommutableVecOp = 2;
2739 if (
isMem(
MI, LastCommutableVecOp))
2740 LastCommutableVecOp--;
2745 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2746 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2747 SrcOpIdx1 == KMaskOp))
2749 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2750 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2751 SrcOpIdx2 == KMaskOp))
2756 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2757 SrcOpIdx2 == CommuteAnyOperandIndex) {
2758 unsigned CommutableOpIdx2 = SrcOpIdx2;
2762 if (SrcOpIdx1 == SrcOpIdx2)
2765 CommutableOpIdx2 = LastCommutableVecOp;
2766 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2768 CommutableOpIdx2 = SrcOpIdx1;
2772 Register Op2Reg =
MI.getOperand(CommutableOpIdx2).getReg();
2774 unsigned CommutableOpIdx1;
2775 for (CommutableOpIdx1 = LastCommutableVecOp;
2776 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2778 if (CommutableOpIdx1 == KMaskOp)
2784 if (Op2Reg !=
MI.getOperand(CommutableOpIdx1).getReg())
2789 if (CommutableOpIdx1 < FirstCommutableVecOp)
2794 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2803 unsigned &SrcOpIdx1,
2804 unsigned &SrcOpIdx2)
const {
2806 if (!
Desc.isCommutable())
2809 switch (
MI.getOpcode()) {
2814 case X86::VCMPSDrri:
2815 case X86::VCMPSSrri:
2816 case X86::VCMPPDrri:
2817 case X86::VCMPPSrri:
2818 case X86::VCMPPDYrri:
2819 case X86::VCMPPSYrri:
2820 case X86::VCMPSDZrri:
2821 case X86::VCMPSSZrri:
2822 case X86::VCMPPDZrri:
2823 case X86::VCMPPSZrri:
2824 case X86::VCMPSHZrri:
2825 case X86::VCMPPHZrri:
2826 case X86::VCMPPHZ128rri:
2827 case X86::VCMPPHZ256rri:
2828 case X86::VCMPPDZ128rri:
2829 case X86::VCMPPSZ128rri:
2830 case X86::VCMPPDZ256rri:
2831 case X86::VCMPPSZ256rri:
2832 case X86::VCMPPDZrrik:
2833 case X86::VCMPPSZrrik:
2834 case X86::VCMPPHZrrik:
2835 case X86::VCMPPDZ128rrik:
2836 case X86::VCMPPSZ128rrik:
2837 case X86::VCMPPHZ128rrik:
2838 case X86::VCMPPDZ256rrik:
2839 case X86::VCMPPSZ256rrik:
2840 case X86::VCMPPHZ256rrik: {
2845 unsigned Imm =
MI.getOperand(3 + OpOffset).getImm() & 0x7;
2862 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2869 if (Subtarget.hasSSE41())
2872 case X86::SHUFPDrri:
2874 if (
MI.getOperand(3).getImm() == 0x02)
2877 case X86::MOVHLPSrr:
2878 case X86::UNPCKHPDrr:
2879 case X86::VMOVHLPSrr:
2880 case X86::VUNPCKHPDrr:
2881 case X86::VMOVHLPSZrr:
2882 case X86::VUNPCKHPDZ128rr:
2883 if (Subtarget.hasSSE2())
2886 case X86::VPTERNLOGDZrri:
2887 case X86::VPTERNLOGDZrmi:
2888 case X86::VPTERNLOGDZ128rri:
2889 case X86::VPTERNLOGDZ128rmi:
2890 case X86::VPTERNLOGDZ256rri:
2891 case X86::VPTERNLOGDZ256rmi:
2892 case X86::VPTERNLOGQZrri:
2893 case X86::VPTERNLOGQZrmi:
2894 case X86::VPTERNLOGQZ128rri:
2895 case X86::VPTERNLOGQZ128rmi:
2896 case X86::VPTERNLOGQZ256rri:
2897 case X86::VPTERNLOGQZ256rmi:
2898 case X86::VPTERNLOGDZrrik:
2899 case X86::VPTERNLOGDZ128rrik:
2900 case X86::VPTERNLOGDZ256rrik:
2901 case X86::VPTERNLOGQZrrik:
2902 case X86::VPTERNLOGQZ128rrik:
2903 case X86::VPTERNLOGQZ256rrik:
2904 case X86::VPTERNLOGDZrrikz:
2905 case X86::VPTERNLOGDZrmikz:
2906 case X86::VPTERNLOGDZ128rrikz:
2907 case X86::VPTERNLOGDZ128rmikz:
2908 case X86::VPTERNLOGDZ256rrikz:
2909 case X86::VPTERNLOGDZ256rmikz:
2910 case X86::VPTERNLOGQZrrikz:
2911 case X86::VPTERNLOGQZrmikz:
2912 case X86::VPTERNLOGQZ128rrikz:
2913 case X86::VPTERNLOGQZ128rmikz:
2914 case X86::VPTERNLOGQZ256rrikz:
2915 case X86::VPTERNLOGQZ256rmikz:
2916 case X86::VPTERNLOGDZ128rmbi:
2917 case X86::VPTERNLOGDZ256rmbi:
2918 case X86::VPTERNLOGDZrmbi:
2919 case X86::VPTERNLOGQZ128rmbi:
2920 case X86::VPTERNLOGQZ256rmbi:
2921 case X86::VPTERNLOGQZrmbi:
2922 case X86::VPTERNLOGDZ128rmbikz:
2923 case X86::VPTERNLOGDZ256rmbikz:
2924 case X86::VPTERNLOGDZrmbikz:
2925 case X86::VPTERNLOGQZ128rmbikz:
2926 case X86::VPTERNLOGQZ256rmbikz:
2927 case X86::VPTERNLOGQZrmbikz:
2928 return findThreeSrcCommutedOpIndices(
MI, SrcOpIdx1, SrcOpIdx2);
2929 case X86::VPDPWSSDYrr:
2930 case X86::VPDPWSSDrr:
2931 case X86::VPDPWSSDSYrr:
2932 case X86::VPDPWSSDSrr:
2933 case X86::VPDPWUUDrr:
2934 case X86::VPDPWUUDYrr:
2935 case X86::VPDPWUUDSrr:
2936 case X86::VPDPWUUDSYrr:
2937 case X86::VPDPBSSDSrr:
2938 case X86::VPDPBSSDSYrr:
2939 case X86::VPDPBSSDrr:
2940 case X86::VPDPBSSDYrr:
2941 case X86::VPDPBUUDSrr:
2942 case X86::VPDPBUUDSYrr:
2943 case X86::VPDPBUUDrr:
2944 case X86::VPDPBUUDYrr:
2945 case X86::VPDPBSSDSZ128rr:
2946 case X86::VPDPBSSDSZ128rrk:
2947 case X86::VPDPBSSDSZ128rrkz:
2948 case X86::VPDPBSSDSZ256rr:
2949 case X86::VPDPBSSDSZ256rrk:
2950 case X86::VPDPBSSDSZ256rrkz:
2951 case X86::VPDPBSSDSZrr:
2952 case X86::VPDPBSSDSZrrk:
2953 case X86::VPDPBSSDSZrrkz:
2954 case X86::VPDPBSSDZ128rr:
2955 case X86::VPDPBSSDZ128rrk:
2956 case X86::VPDPBSSDZ128rrkz:
2957 case X86::VPDPBSSDZ256rr:
2958 case X86::VPDPBSSDZ256rrk:
2959 case X86::VPDPBSSDZ256rrkz:
2960 case X86::VPDPBSSDZrr:
2961 case X86::VPDPBSSDZrrk:
2962 case X86::VPDPBSSDZrrkz:
2963 case X86::VPDPBUUDSZ128rr:
2964 case X86::VPDPBUUDSZ128rrk:
2965 case X86::VPDPBUUDSZ128rrkz:
2966 case X86::VPDPBUUDSZ256rr:
2967 case X86::VPDPBUUDSZ256rrk:
2968 case X86::VPDPBUUDSZ256rrkz:
2969 case X86::VPDPBUUDSZrr:
2970 case X86::VPDPBUUDSZrrk:
2971 case X86::VPDPBUUDSZrrkz:
2972 case X86::VPDPBUUDZ128rr:
2973 case X86::VPDPBUUDZ128rrk:
2974 case X86::VPDPBUUDZ128rrkz:
2975 case X86::VPDPBUUDZ256rr:
2976 case X86::VPDPBUUDZ256rrk:
2977 case X86::VPDPBUUDZ256rrkz:
2978 case X86::VPDPBUUDZrr:
2979 case X86::VPDPBUUDZrrk:
2980 case X86::VPDPBUUDZrrkz:
2981 case X86::VPDPWSSDZ128rr:
2982 case X86::VPDPWSSDZ128rrk:
2983 case X86::VPDPWSSDZ128rrkz:
2984 case X86::VPDPWSSDZ256rr:
2985 case X86::VPDPWSSDZ256rrk:
2986 case X86::VPDPWSSDZ256rrkz:
2987 case X86::VPDPWSSDZrr:
2988 case X86::VPDPWSSDZrrk:
2989 case X86::VPDPWSSDZrrkz:
2990 case X86::VPDPWSSDSZ128rr:
2991 case X86::VPDPWSSDSZ128rrk:
2992 case X86::VPDPWSSDSZ128rrkz:
2993 case X86::VPDPWSSDSZ256rr:
2994 case X86::VPDPWSSDSZ256rrk:
2995 case X86::VPDPWSSDSZ256rrkz:
2996 case X86::VPDPWSSDSZrr:
2997 case X86::VPDPWSSDSZrrk:
2998 case X86::VPDPWSSDSZrrkz:
2999 case X86::VPDPWUUDZ128rr:
3000 case X86::VPDPWUUDZ128rrk:
3001 case X86::VPDPWUUDZ128rrkz:
3002 case X86::VPDPWUUDZ256rr:
3003 case X86::VPDPWUUDZ256rrk:
3004 case X86::VPDPWUUDZ256rrkz:
3005 case X86::VPDPWUUDZrr:
3006 case X86::VPDPWUUDZrrk:
3007 case X86::VPDPWUUDZrrkz:
3008 case X86::VPDPWUUDSZ128rr:
3009 case X86::VPDPWUUDSZ128rrk:
3010 case X86::VPDPWUUDSZ128rrkz:
3011 case X86::VPDPWUUDSZ256rr:
3012 case X86::VPDPWUUDSZ256rrk:
3013 case X86::VPDPWUUDSZ256rrkz:
3014 case X86::VPDPWUUDSZrr:
3015 case X86::VPDPWUUDSZrrk:
3016 case X86::VPDPWUUDSZrrkz:
3017 case X86::VPMADD52HUQrr:
3018 case X86::VPMADD52HUQYrr:
3019 case X86::VPMADD52HUQZ128r:
3020 case X86::VPMADD52HUQZ128rk:
3021 case X86::VPMADD52HUQZ128rkz:
3022 case X86::VPMADD52HUQZ256r:
3023 case X86::VPMADD52HUQZ256rk:
3024 case X86::VPMADD52HUQZ256rkz:
3025 case X86::VPMADD52HUQZr:
3026 case X86::VPMADD52HUQZrk:
3027 case X86::VPMADD52HUQZrkz:
3028 case X86::VPMADD52LUQrr:
3029 case X86::VPMADD52LUQYrr:
3030 case X86::VPMADD52LUQZ128r:
3031 case X86::VPMADD52LUQZ128rk:
3032 case X86::VPMADD52LUQZ128rkz:
3033 case X86::VPMADD52LUQZ256r:
3034 case X86::VPMADD52LUQZ256rk:
3035 case X86::VPMADD52LUQZ256rkz:
3036 case X86::VPMADD52LUQZr:
3037 case X86::VPMADD52LUQZrk:
3038 case X86::VPMADD52LUQZrkz:
3039 case X86::VFMADDCPHZr:
3040 case X86::VFMADDCPHZrk:
3041 case X86::VFMADDCPHZrkz:
3042 case X86::VFMADDCPHZ128r:
3043 case X86::VFMADDCPHZ128rk:
3044 case X86::VFMADDCPHZ128rkz:
3045 case X86::VFMADDCPHZ256r:
3046 case X86::VFMADDCPHZ256rk:
3047 case X86::VFMADDCPHZ256rkz:
3048 case X86::VFMADDCSHZr:
3049 case X86::VFMADDCSHZrk:
3050 case X86::VFMADDCSHZrkz: {
3051 unsigned CommutableOpIdx1 = 2;
3052 unsigned CommutableOpIdx2 = 3;
3058 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3061 if (!
MI.getOperand(SrcOpIdx1).isReg() || !
MI.getOperand(SrcOpIdx2).isReg())
3071 return findThreeSrcCommutedOpIndices(
MI, SrcOpIdx1, SrcOpIdx2,
3078 unsigned CommutableOpIdx1 =
Desc.getNumDefs() + 1;
3079 unsigned CommutableOpIdx2 =
Desc.getNumDefs() + 2;
3082 if ((
MI.getDesc().getOperandConstraint(
Desc.getNumDefs(),
3097 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3101 if (!
MI.getOperand(SrcOpIdx1).isReg() ||
3102 !
MI.getOperand(SrcOpIdx2).isReg())
3114 unsigned Opcode =
MI->getOpcode();
3115 if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
3116 Opcode != X86::LEA64_32r)
3138 unsigned Opcode =
MI.getOpcode();
3139 if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
3166 unsigned Opcode =
MCID.getOpcode();
3167 if (!(X86::isJCC(Opcode) || X86::isSETCC(Opcode) || X86::isSETZUCC(Opcode) ||
3168 X86::isCMOVCC(Opcode) || X86::isCFCMOVCC(Opcode) ||
3169 X86::isCCMPCC(Opcode) || X86::isCTESTCC(Opcode)))
3172 unsigned NumUses =
MCID.getNumOperands() -
MCID.getNumDefs();
3181 CondNo +=
MCID.getNumDefs();
3191 return X86::isSETCC(
MI.getOpcode()) || X86::isSETZUCC(
MI.getOpcode())
3207 return X86::isCCMPCC(
MI.getOpcode()) || X86::isCTESTCC(
MI.getOpcode())
3238 enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
3269#define GET_X86_NF_TRANSFORM_TABLE
3270#define GET_X86_ND2NONND_TABLE
3271#include "X86GenInstrMapping.inc"
3276 return (
I == Table.
end() ||
I->OldOpc !=
Opc) ? 0U :
I->NewOpc;
3279#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3281 static std::atomic<bool> NFTableChecked(
false);
3282 if (!NFTableChecked.load(std::memory_order_relaxed)) {
3284 "X86NFTransformTable is not sorted!");
3285 NFTableChecked.store(
true, std::memory_order_relaxed);
3292#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3294 static std::atomic<bool> NDTableChecked(
false);
3295 if (!NDTableChecked.load(std::memory_order_relaxed)) {
3297 "X86ND2NonNDTableis not sorted!");
3298 NDTableChecked.store(
true, std::memory_order_relaxed);
3378std::pair<X86::CondCode, bool>
3381 bool NeedSwap =
false;
3382 switch (Predicate) {
3461 return std::make_pair(CC, NeedSwap);
3470#define GET_ND_IF_ENABLED(OPC) (HasNDD ? OPC##_ND : OPC)
3564 switch (Imm & 0x3) {
3582 if (Info.RegClass == X86::VR128RegClassID ||
3583 Info.RegClass == X86::VR128XRegClassID)
3585 if (Info.RegClass == X86::VR256RegClassID ||
3586 Info.RegClass == X86::VR256XRegClassID)
3588 if (Info.RegClass == X86::VR512RegClassID)
3595 return (
Reg == X86::FPCW ||
Reg == X86::FPSW ||
3596 (
Reg >= X86::ST0 &&
Reg <= X86::ST7));
3604 if (
MI.isCall() ||
MI.isInlineAsm())
3628#ifdef EXPENSIVE_CHECKS
3630 "Got false negative from X86II::getMemoryOperandNo()!");
3640#ifdef EXPENSIVE_CHECKS
3642 "Expected no operands to have OPERAND_MEMORY type!");
3651 if (IsMemOp(
Desc.operands()[
I])) {
3652#ifdef EXPENSIVE_CHECKS
3656 "Expected all five operands in the memory reference to have "
3657 "OPERAND_MEMORY type!");
3669 "Unexpected number of operands!");
3672 if (!Index.isReg() || Index.getReg() != X86::NoRegister)
3680 MI.getParent()->getParent()->getConstantPool()->getConstants();
3692 switch (
MI.getOpcode()) {
3693 case X86::TCRETURNdi:
3694 case X86::TCRETURNri:
3695 case X86::TCRETURNmi:
3696 case X86::TCRETURNdi64:
3697 case X86::TCRETURNri64:
3698 case X86::TCRETURNri64_ImpCall:
3699 case X86::TCRETURNmi64:
3718 if (Symbol ==
"__x86_indirect_thunk_r11")
3723 if (TailCall.
getOpcode() != X86::TCRETURNdi &&
3724 TailCall.
getOpcode() != X86::TCRETURNdi64) {
3729 if (Subtarget.isTargetWin64() && MF->
hasWinCFI()) {
3756 while (
I !=
MBB.begin()) {
3758 if (
I->isDebugInstr())
3761 assert(0 &&
"Can't find the branch to replace!");
3765 if (CC != BranchCond[0].
getImm())
3771 unsigned Opc = TailCall.
getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3772 : X86::TCRETURNdi64cc;
3785 LiveRegs.stepForward(*MIB, Clobbers);
3786 for (
const auto &
C : Clobbers) {
3791 I->eraseFromParent();
3805 if (Succ->isEHPad() || (Succ ==
TBB && FallthroughBB))
3808 if (FallthroughBB && FallthroughBB !=
TBB)
3810 FallthroughBB = Succ;
3812 return FallthroughBB;
3815bool X86InstrInfo::analyzeBranchImpl(
3826 if (
I->isDebugInstr())
3831 if (!isUnpredicatedTerminator(*
I))
3840 if (
I->getOpcode() == X86::JMP_1) {
3844 TBB =
I->getOperand(0).getMBB();
3859 UnCondBrIter =
MBB.
end();
3864 TBB =
I->getOperand(0).getMBB();
3875 if (
I->findRegisterUseOperand(X86::EFLAGS,
nullptr)->isUndef())
3881 TBB =
I->getOperand(0).getMBB();
3896 if (OldBranchCode == BranchCode &&
TBB == NewTBB)
3902 if (
TBB == NewTBB &&
3935 Cond[0].setImm(BranchCode);
3946 bool AllowModify)
const {
3948 return analyzeBranchImpl(
MBB,
TBB, FBB,
Cond, CondBranches, AllowModify);
3954 assert(MemRefBegin >= 0 &&
"instr should have memory operand");
3966 if (!
Reg.isVirtual())
3971 unsigned Opcode =
MI->getOpcode();
3972 if (Opcode != X86::LEA64r && Opcode != X86::LEA32r)
3978 unsigned Opcode =
MI.getOpcode();
3981 if (Opcode == X86::JMP64m || Opcode == X86::JMP32m) {
3989 if (Opcode == X86::JMP64r || Opcode == X86::JMP32r) {
3991 if (!Reg.isVirtual())
3998 if (
Add->getOpcode() != X86::ADD64rr &&
Add->getOpcode() != X86::ADD32rr)
4011 MachineBranchPredicate &MBP,
4012 bool AllowModify)
const {
4013 using namespace std::placeholders;
4017 if (analyzeBranchImpl(
MBB, MBP.TrueDest, MBP.FalseDest,
Cond, CondBranches,
4021 if (
Cond.size() != 1)
4024 assert(MBP.TrueDest &&
"expected!");
4027 MBP.FalseDest =
MBB.getNextNode();
4032 bool SingleUseCondition =
true;
4035 if (
MI.modifiesRegister(X86::EFLAGS,
TRI)) {
4040 if (
MI.readsRegister(X86::EFLAGS,
TRI))
4041 SingleUseCondition =
false;
4047 if (SingleUseCondition) {
4048 for (
auto *Succ :
MBB.successors())
4049 if (Succ->isLiveIn(X86::EFLAGS))
4050 SingleUseCondition =
false;
4053 MBP.ConditionDef = ConditionDef;
4054 MBP.SingleUseCondition = SingleUseCondition;
4061 const unsigned TestOpcode =
4062 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
4064 if (ConditionDef->
getOpcode() == TestOpcode &&
4071 ? MachineBranchPredicate::PRED_NE
4072 : MachineBranchPredicate::PRED_EQ;
4080 int *BytesRemoved)
const {
4081 assert(!BytesRemoved &&
"code size not handled");
4086 while (
I !=
MBB.begin()) {
4088 if (
I->isDebugInstr())
4090 if (
I->getOpcode() != X86::JMP_1 &&
4094 I->eraseFromParent();
4108 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
4110 "X86 branch conditions have one component!");
4111 assert(!BytesAdded &&
"code size not handled");
4115 assert(!FBB &&
"Unconditional branch with multiple successors!");
4121 bool FallThru = FBB ==
nullptr;
4136 if (FBB ==
nullptr) {
4138 assert(FBB &&
"MBB cannot be the last block in function when the false "
4139 "body is a fall-through.");
4163 Register FalseReg,
int &CondCycles,
4164 int &TrueCycles,
int &FalseCycles)
const {
4166 if (!Subtarget.canUseCMOV())
4168 if (
Cond.size() != 1)
4177 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
4182 if (X86::GR16RegClass.hasSubClassEq(RC) ||
4183 X86::GR32RegClass.hasSubClassEq(RC) ||
4184 X86::GR64RegClass.hasSubClassEq(RC)) {
4205 assert(
Cond.size() == 1 &&
"Invalid Cond array");
4208 false , Subtarget.hasNDD());
4217 return X86::GR8_ABCD_HRegClass.contains(
Reg);
4223 bool HasAVX = Subtarget.
hasAVX();
4225 bool HasEGPR = Subtarget.hasEGPR();
4232 if (X86::VK16RegClass.
contains(SrcReg)) {
4233 if (X86::GR64RegClass.
contains(DestReg)) {
4234 assert(Subtarget.hasBWI());
4235 return HasEGPR ? X86::KMOVQrk_EVEX : X86::KMOVQrk;
4237 if (X86::GR32RegClass.
contains(DestReg))
4238 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDrk_EVEX : X86::KMOVDrk)
4239 : (HasEGPR ? X86::KMOVWrk_EVEX : X86::KMOVWrk);
4247 if (X86::VK16RegClass.
contains(DestReg)) {
4248 if (X86::GR64RegClass.
contains(SrcReg)) {
4249 assert(Subtarget.hasBWI());
4250 return HasEGPR ? X86::KMOVQkr_EVEX : X86::KMOVQkr;
4252 if (X86::GR32RegClass.
contains(SrcReg))
4253 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDkr_EVEX : X86::KMOVDkr)
4254 : (HasEGPR ? X86::KMOVWkr_EVEX : X86::KMOVWkr);
4262 if (X86::GR64RegClass.
contains(DestReg)) {
4263 if (X86::VR128XRegClass.
contains(SrcReg))
4265 return HasAVX512 ? X86::VMOVPQIto64Zrr
4266 : HasAVX ? X86::VMOVPQIto64rr
4267 : X86::MOVPQIto64rr;
4268 if (X86::VR64RegClass.
contains(SrcReg))
4270 return X86::MMX_MOVD64from64rr;
4271 }
else if (X86::GR64RegClass.
contains(SrcReg)) {
4273 if (X86::VR128XRegClass.
contains(DestReg))
4274 return HasAVX512 ? X86::VMOV64toPQIZrr
4275 : HasAVX ? X86::VMOV64toPQIrr
4276 : X86::MOV64toPQIrr;
4278 if (X86::VR64RegClass.
contains(DestReg))
4279 return X86::MMX_MOVD64to64rr;
4285 if (X86::GR32RegClass.
contains(DestReg) &&
4286 X86::VR128XRegClass.
contains(SrcReg))
4288 return HasAVX512 ? X86::VMOVPDI2DIZrr
4289 : HasAVX ? X86::VMOVPDI2DIrr
4292 if (X86::VR128XRegClass.
contains(DestReg) &&
4293 X86::GR32RegClass.
contains(SrcReg))
4295 return HasAVX512 ? X86::VMOVDI2PDIZrr
4296 : HasAVX ? X86::VMOVDI2PDIrr
4306 bool RenamableDest,
bool RenamableSrc)
const {
4308 bool HasAVX = Subtarget.hasAVX();
4309 bool HasVLX = Subtarget.hasVLX();
4310 bool HasEGPR = Subtarget.hasEGPR();
4312 if (X86::GR64RegClass.
contains(DestReg, SrcReg))
4314 else if (X86::GR32RegClass.
contains(DestReg, SrcReg))
4316 else if (X86::GR16RegClass.
contains(DestReg, SrcReg))
4318 else if (X86::GR8RegClass.
contains(DestReg, SrcReg)) {
4321 if ((
isHReg(DestReg) ||
isHReg(SrcReg)) && Subtarget.is64Bit()) {
4322 Opc = X86::MOV8rr_NOREX;
4325 "8-bit H register can not be copied outside GR8_NOREX");
4328 }
else if (X86::VR64RegClass.
contains(DestReg, SrcReg))
4329 Opc = X86::MMX_MOVQ64rr;
4330 else if (X86::VR128XRegClass.
contains(DestReg, SrcReg)) {
4332 Opc = X86::VMOVAPSZ128rr;
4333 else if (X86::VR128RegClass.
contains(DestReg, SrcReg))
4334 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
4338 Opc = X86::VMOVAPSZrr;
4341 TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, &X86::VR512RegClass);
4343 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4345 }
else if (X86::VR256XRegClass.
contains(DestReg, SrcReg)) {
4347 Opc = X86::VMOVAPSZ256rr;
4348 else if (X86::VR256RegClass.
contains(DestReg, SrcReg))
4349 Opc = X86::VMOVAPSYrr;
4353 Opc = X86::VMOVAPSZrr;
4356 TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, &X86::VR512RegClass);
4358 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4360 }
else if (X86::VR512RegClass.
contains(DestReg, SrcReg))
4361 Opc = X86::VMOVAPSZrr;
4364 else if (X86::VK16RegClass.
contains(DestReg, SrcReg))
4365 Opc = Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVQkk)
4366 : (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVWkk);
4377 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
4385 LLVM_DEBUG(
dbgs() <<
"Cannot copy " << RI.getName(SrcReg) <<
" to "
4386 << RI.getName(DestReg) <<
'\n');
4390std::optional<DestSourcePair>
4392 if (
MI.isMoveReg()) {
4396 if (
MI.getOperand(0).isUndef() &&
MI.getOperand(0).getSubReg())
4397 return std::nullopt;
4401 return std::nullopt;
4406 return Load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
4408 return X86::MOVSHPrm;
4409 return X86::MOVSHPmr;
4414 bool IsStackAligned,
4416 bool HasAVX = STI.
hasAVX();
4418 bool HasVLX = STI.hasVLX();
4419 bool HasEGPR = STI.hasEGPR();
4421 assert(RC !=
nullptr &&
"Invalid target register class");
4426 assert(X86::GR8RegClass.hasSubClassEq(RC) &&
"Unknown 1-byte regclass");
4430 if (
isHReg(
Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
4431 return Load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
4432 return Load ? X86::MOV8rm : X86::MOV8mr;
4434 if (X86::VK16RegClass.hasSubClassEq(RC))
4435 return Load ? (HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm)
4436 : (HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk);
4437 assert(X86::GR16RegClass.hasSubClassEq(RC) &&
"Unknown 2-byte regclass");
4438 return Load ? X86::MOV16rm : X86::MOV16mr;
4440 if (X86::GR32RegClass.hasSubClassEq(RC))
4441 return Load ? X86::MOV32rm : X86::MOV32mr;
4442 if (X86::FR32XRegClass.hasSubClassEq(RC))
4443 return Load ? (HasAVX512 ? X86::VMOVSSZrm_alt
4444 : HasAVX ? X86::VMOVSSrm_alt
4446 : (HasAVX512 ? X86::VMOVSSZmr
4447 : HasAVX ? X86::VMOVSSmr
4449 if (X86::RFP32RegClass.hasSubClassEq(RC))
4450 return Load ? X86::LD_Fp32m : X86::ST_Fp32m;
4451 if (X86::VK32RegClass.hasSubClassEq(RC)) {
4452 assert(STI.hasBWI() &&
"KMOVD requires BWI");
4453 return Load ? (HasEGPR ? X86::KMOVDkm_EVEX : X86::KMOVDkm)
4454 : (HasEGPR ? X86::KMOVDmk_EVEX : X86::KMOVDmk);
4458 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
4459 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
4460 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
4461 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
4462 X86::VK16PAIRRegClass.hasSubClassEq(RC))
4463 return Load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
4464 if (X86::FR16RegClass.hasSubClassEq(RC) ||
4465 X86::FR16XRegClass.hasSubClassEq(RC))
4469 if (X86::GR64RegClass.hasSubClassEq(RC))
4470 return Load ? X86::MOV64rm : X86::MOV64mr;
4471 if (X86::FR64XRegClass.hasSubClassEq(RC))
4472 return Load ? (HasAVX512 ? X86::VMOVSDZrm_alt
4473 : HasAVX ? X86::VMOVSDrm_alt
4475 : (HasAVX512 ? X86::VMOVSDZmr
4476 : HasAVX ? X86::VMOVSDmr
4478 if (X86::VR64RegClass.hasSubClassEq(RC))
4479 return Load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
4480 if (X86::RFP64RegClass.hasSubClassEq(RC))
4481 return Load ? X86::LD_Fp64m : X86::ST_Fp64m;
4482 if (X86::VK64RegClass.hasSubClassEq(RC)) {
4483 assert(STI.hasBWI() &&
"KMOVQ requires BWI");
4484 return Load ? (HasEGPR ? X86::KMOVQkm_EVEX : X86::KMOVQkm)
4485 : (HasEGPR ? X86::KMOVQmk_EVEX : X86::KMOVQmk);
4489 assert(X86::RFP80RegClass.hasSubClassEq(RC) &&
"Unknown 10-byte regclass");
4490 return Load ? X86::LD_Fp80m : X86::ST_FpP80m;
4492 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
4495 return Load ? (HasVLX ? X86::VMOVAPSZ128rm
4496 : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX
4497 : HasAVX ? X86::VMOVAPSrm
4499 : (HasVLX ? X86::VMOVAPSZ128mr
4500 : HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX
4501 : HasAVX ? X86::VMOVAPSmr
4504 return Load ? (HasVLX ? X86::VMOVUPSZ128rm
4505 : HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX
4506 : HasAVX ? X86::VMOVUPSrm
4508 : (HasVLX ? X86::VMOVUPSZ128mr
4509 : HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX
4510 : HasAVX ? X86::VMOVUPSmr
4516 assert(X86::VR256XRegClass.hasSubClassEq(RC) &&
"Unknown 32-byte regclass");
4519 return Load ? (HasVLX ? X86::VMOVAPSZ256rm
4520 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
4522 : (HasVLX ? X86::VMOVAPSZ256mr
4523 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
4526 return Load ? (HasVLX ? X86::VMOVUPSZ256rm
4527 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
4529 : (HasVLX ? X86::VMOVUPSZ256mr
4530 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
4533 assert(X86::VR512RegClass.hasSubClassEq(RC) &&
"Unknown 64-byte regclass");
4536 return Load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
4538 return Load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
4540 assert(X86::TILERegClass.hasSubClassEq(RC) &&
"Unknown 1024-byte regclass");
4541 assert(STI.hasAMXTILE() &&
"Using 8*1024-bit register requires AMX-TILE");
4542#define GET_EGPR_IF_ENABLED(OPC) (STI.hasEGPR() ? OPC##_EVEX : OPC)
4545#undef GET_EGPR_IF_ENABLED
4549std::optional<ExtAddrMode>
4554 if (MemRefBegin < 0)
4555 return std::nullopt;
4560 if (!BaseOp.isReg())
4561 return std::nullopt;
4565 if (!DispMO.
isImm())
4566 return std::nullopt;
4592 ErrInfo =
"Scale factor in address must be 1, 2, 4 or 8";
4597 ErrInfo =
"Displacement in address must fit into 32-bit signed "
4607 int64_t &ImmVal)
const {
4613 if (
MI.isSubregToReg()) {
4617 unsigned SubIdx =
MI.getOperand(2).getImm();
4618 MovReg =
MI.getOperand(1).getReg();
4619 if (SubIdx != X86::sub_32bit)
4622 MovMI =
MRI.getUniqueVRegDef(MovReg);
4627 if (MovMI->
getOpcode() == X86::MOV32r0 &&
4633 if (MovMI->
getOpcode() != X86::MOV32ri &&
4647 if (!
MI->modifiesRegister(NullValueReg,
TRI))
4649 switch (
MI->getOpcode()) {
4656 assert(
MI->getOperand(0).isDef() &&
MI->getOperand(1).isUse() &&
4657 "expected for shift opcode!");
4658 return MI->getOperand(0).getReg() == NullValueReg &&
4659 MI->getOperand(1).getReg() == NullValueReg;
4664 return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
4678 if (MemRefBegin < 0)
4685 if (!BaseOp->
isReg())
4698 if (!DispMO.
isImm())
4703 if (!BaseOp->
isReg())
4706 OffsetIsScalable =
false;
4710 Width = !
MemOp.memoperands_empty() ?
MemOp.memoperands().front()->getSize()
4718 bool IsStackAligned,
4733 case X86::TILELOADD:
4734 case X86::TILESTORED:
4735 case X86::TILELOADD_EVEX:
4736 case X86::TILESTORED_EVEX:
4744 bool isKill)
const {
4748 case X86::TILESTORED:
4749 case X86::TILESTORED_EVEX: {
4752 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4762 case X86::TILELOADD:
4763 case X86::TILELOADD_EVEX: {
4766 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4786 "Stack slot too small for store");
4788 unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
4790 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4811 "Load size exceeds stack slot");
4812 unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
4814 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4826 Register &SrcReg2, int64_t &CmpMask,
4827 int64_t &CmpValue)
const {
4828 switch (
MI.getOpcode()) {
4831 case X86::CMP64ri32:
4835 SrcReg =
MI.getOperand(0).getReg();
4837 if (
MI.getOperand(1).isImm()) {
4839 CmpValue =
MI.getOperand(1).getImm();
4841 CmpMask = CmpValue = 0;
4849 SrcReg =
MI.getOperand(1).getReg();
4858 SrcReg =
MI.getOperand(1).getReg();
4859 SrcReg2 =
MI.getOperand(2).getReg();
4867 SrcReg =
MI.getOperand(1).getReg();
4869 if (
MI.getOperand(2).isImm()) {
4871 CmpValue =
MI.getOperand(2).getImm();
4873 CmpMask = CmpValue = 0;
4880 SrcReg =
MI.getOperand(0).getReg();
4881 SrcReg2 =
MI.getOperand(1).getReg();
4889 SrcReg =
MI.getOperand(0).getReg();
4890 if (
MI.getOperand(1).getReg() != SrcReg)
4897 case X86::TEST64ri32:
4901 SrcReg =
MI.getOperand(0).getReg();
4911bool X86InstrInfo::isRedundantFlagInstr(
const MachineInstr &FlagI,
4913 int64_t ImmMask, int64_t ImmValue,
4915 int64_t *ImmDelta)
const {
4930 OIMask != ImmMask || OIValue != ImmValue)
4932 if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) {
4936 if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) {
4942 case X86::CMP64ri32:
4946 case X86::TEST64ri32:
4957 case X86::TEST8rr: {
4964 SrcReg == OISrcReg && ImmMask == OIMask) {
4965 if (OIValue == ImmValue) {
4968 }
else if (
static_cast<uint64_t
>(ImmValue) ==
4969 static_cast<uint64_t
>(OIValue) - 1) {
4972 }
else if (
static_cast<uint64_t
>(ImmValue) ==
4973 static_cast<uint64_t
>(OIValue) + 1) {
4991 bool &ClearsOverflowFlag) {
4993 ClearsOverflowFlag =
false;
4999 if (
MI.getOpcode() == X86::ADD64rm ||
MI.getOpcode() == X86::ADD32rm) {
5000 unsigned Flags =
MI.getOperand(5).getTargetFlags();
5006 switch (
MI.getOpcode()) {
5102 case X86::LZCNT16rr:
5103 case X86::LZCNT16rm:
5104 case X86::LZCNT32rr:
5105 case X86::LZCNT32rm:
5106 case X86::LZCNT64rr:
5107 case X86::LZCNT64rm:
5108 case X86::POPCNT16rr:
5109 case X86::POPCNT16rm:
5110 case X86::POPCNT32rr:
5111 case X86::POPCNT32rm:
5112 case X86::POPCNT64rr:
5113 case X86::POPCNT64rm:
5114 case X86::TZCNT16rr:
5115 case X86::TZCNT16rm:
5116 case X86::TZCNT32rr:
5117 case X86::TZCNT32rm:
5118 case X86::TZCNT64rr:
5119 case X86::TZCNT64rm:
5165 case X86::BLSMSK32rr:
5166 case X86::BLSMSK32rm:
5167 case X86::BLSMSK64rr:
5168 case X86::BLSMSK64rm:
5173 case X86::BLCFILL32rr:
5174 case X86::BLCFILL32rm:
5175 case X86::BLCFILL64rr:
5176 case X86::BLCFILL64rm:
5181 case X86::BLCIC32rr:
5182 case X86::BLCIC32rm:
5183 case X86::BLCIC64rr:
5184 case X86::BLCIC64rm:
5185 case X86::BLCMSK32rr:
5186 case X86::BLCMSK32rm:
5187 case X86::BLCMSK64rr:
5188 case X86::BLCMSK64rm:
5193 case X86::BLSFILL32rr:
5194 case X86::BLSFILL32rm:
5195 case X86::BLSFILL64rr:
5196 case X86::BLSFILL64rm:
5197 case X86::BLSIC32rr:
5198 case X86::BLSIC32rm:
5199 case X86::BLSIC64rr:
5200 case X86::BLSIC64rm:
5205 case X86::T1MSKC32rr:
5206 case X86::T1MSKC32rm:
5207 case X86::T1MSKC64rr:
5208 case X86::T1MSKC64rm:
5209 case X86::TZMSK32rr:
5210 case X86::TZMSK32rm:
5211 case X86::TZMSK64rr:
5212 case X86::TZMSK64rm:
5216 ClearsOverflowFlag =
true;
5218 case X86::BEXTR32rr:
5219 case X86::BEXTR64rr:
5220 case X86::BEXTR32rm:
5221 case X86::BEXTR64rm:
5222 case X86::BEXTRI32ri:
5223 case X86::BEXTRI32mi:
5224 case X86::BEXTRI64ri:
5225 case X86::BEXTRI64mi:
5236 switch (
MI.getOpcode()) {
5244 case X86::LZCNT16rr:
5245 case X86::LZCNT32rr:
5246 case X86::LZCNT64rr:
5248 case X86::POPCNT16rr:
5249 case X86::POPCNT32rr:
5250 case X86::POPCNT64rr:
5252 case X86::TZCNT16rr:
5253 case X86::TZCNT32rr:
5254 case X86::TZCNT64rr:
5268 case X86::BLSMSK32rr:
5269 case X86::BLSMSK64rr:
5301 unsigned NewOpcode = 0;
5302#define FROM_TO(A, B) \
5303 CASE_ND(A) NewOpcode = X86::B; \
5327 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
5328 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
5336 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
5342 assert(SrcRegDef &&
"Must have a definition (SSA)");
5348 bool NoSignFlag =
false;
5349 bool ClearsOverflowFlag =
false;
5350 bool ShouldUpdateCC =
false;
5351 bool IsSwapped =
false;
5352 bool HasNF = Subtarget.hasNF();
5355 int64_t ImmDelta = 0;
5368 if (&Inst == SrcRegDef) {
5391 Subtarget, NoSignFlag, ClearsOverflowFlag)) {
5400 if (Inst.modifiesRegister(X86::EFLAGS,
TRI)) {
5411 Inst.getOperand(OpNo).getReg() == SrcReg) {
5412 ShouldUpdateCC =
true;
5423 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
5424 Inst, &IsSwapped, &ImmDelta)) {
5432 if (!Movr0Inst && Inst.
getOpcode() == X86::MOV32r0 &&
5433 Inst.registerDefIsDead(X86::EFLAGS,
TRI)) {
5447 if (HasNF && Inst.registerDefIsDead(X86::EFLAGS,
TRI) && !IsWithReloc) {
5452 InstsToUpdate.
push_back(std::make_pair(&Inst, NewOp));
5466 if (
MBB->pred_size() != 1)
5468 MBB = *
MBB->pred_begin();
5469 From =
MBB->rbegin();
5476 bool FlagsMayLiveOut =
true;
5481 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS,
TRI);
5482 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS,
TRI);
5484 if (!UseEFLAGS && ModifyEFLAGS) {
5486 FlagsMayLiveOut =
false;
5489 if (!UseEFLAGS && !ModifyEFLAGS)
5520 if (!ClearsOverflowFlag)
5539 ReplacementCC = NewCC;
5545 }
else if (IsSwapped) {
5552 ShouldUpdateCC =
true;
5553 }
else if (ImmDelta != 0) {
5554 unsigned BitWidth = RI.getRegSizeInBits(*
MRI->getRegClass(SrcReg));
5564 if (ImmDelta != 1 || CmpValue == 0)
5574 if (ImmDelta != 1 || CmpValue == 0)
5601 ShouldUpdateCC =
true;
5604 if (ShouldUpdateCC && ReplacementCC != OldCC) {
5608 OpsToUpdate.
push_back(std::make_pair(&Instr, ReplacementCC));
5610 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS,
TRI)) {
5612 FlagsMayLiveOut =
false;
5619 if ((
MI !=
nullptr || ShouldUpdateCC) && FlagsMayLiveOut) {
5626 assert((
MI ==
nullptr ||
Sub ==
nullptr) &&
"Should not have Sub and MI set");
5633 if (&CmpMBB != SubBB)
5637 InsertE =
Sub->getParent()->rend();
5638 for (; InsertI != InsertE; ++InsertI) {
5640 if (!Instr->readsRegister(X86::EFLAGS,
TRI) &&
5641 Instr->modifiesRegister(X86::EFLAGS,
TRI)) {
5648 if (InsertI == InsertE)
5653 for (
auto &Inst : InstsToUpdate) {
5654 Inst.first->setDesc(
get(Inst.second));
5655 Inst.first->removeOperand(
5656 Inst.first->findRegisterDefOperandIdx(X86::EFLAGS,
nullptr));
5661 Sub->findRegisterDefOperand(X86::EFLAGS,
nullptr);
5662 assert(FlagDef &&
"Unable to locate a def EFLAGS operand");
5668 for (
auto &
Op : OpsToUpdate) {
5669 Op.first->getOperand(
Op.first->getDesc().getNumOperands() - 1)
5674 MBB = *
MBB->pred_begin()) {
5675 assert(
MBB->pred_size() == 1 &&
"Expected exactly one predecessor");
5676 if (!
MBB->isLiveIn(X86::EFLAGS))
5677 MBB->addLiveIn(X86::EFLAGS);
5705#define FROM_TO(FROM, TO) \
5708 case X86::FROM##_ND: \
5709 return X86::TO##_ND;
5739#define FROM_TO(FROM, TO) \
5743 FROM_TO(CTEST64rr, CTEST64ri32)
5762 bool MakeChange)
const {
5768 const TargetRegisterClass *RC =
nullptr;
5770 RC =
MRI->getRegClass(
Reg);
5772 (
Reg.
isVirtual() && X86::GR64RegClass.hasSubClassEq(RC))) {
5777 if (
UseMI.findRegisterUseOperand(
Reg,
nullptr)->getSubReg())
5782 !
MRI->hasOneNonDBGUse(
Reg))
5787 if (
Opc == TargetOpcode::COPY) {
5789 const TargetRegisterClass *RC =
nullptr;
5791 RC =
MRI->getRegClass(ToReg);
5792 bool GR32Reg = (ToReg.
isVirtual() && X86::GR32RegClass.hasSubClassEq(RC)) ||
5794 bool GR64Reg = (ToReg.
isVirtual() && X86::GR64RegClass.hasSubClassEq(RC)) ||
5796 bool GR8Reg = (ToReg.
isVirtual() && X86::GR8RegClass.hasSubClassEq(RC)) ||
5807 NewOpc = X86::MOV32ri64;
5809 NewOpc = X86::MOV64ri;
5810 }
else if (GR32Reg) {
5811 NewOpc = X86::MOV32ri;
5815 if (
UseMI.getParent()->computeRegisterLiveness(
5824 UseMI.removeOperand(
5825 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr));
5833 NewOpc = X86::MOV8ri;
5843 if ((NewOpc == X86::SUB64ri32 || NewOpc == X86::SUB32ri ||
5844 NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri ||
5845 NewOpc == X86::SUB64ri32_ND || NewOpc == X86::SUB32ri_ND ||
5846 NewOpc == X86::SBB64ri32_ND || NewOpc == X86::SBB32ri_ND) &&
5847 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr) != 2)
5850 if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
5851 (NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
5852 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr) != 1)
5855 using namespace X86;
5856 if (isSHL(
Opc) || isSHR(
Opc) || isSAR(
Opc) || isROL(
Opc) || isROR(
Opc) ||
5857 isRCL(
Opc) || isRCR(
Opc)) {
5858 unsigned RegIdx =
UseMI.findRegisterUseOperandIdx(
Reg,
nullptr);
5868 UseMI.removeOperand(RegIdx);
5882 UseMI.registerDefIsDead(X86::EFLAGS,
nullptr)) {
5886 UseMI.setDesc(
get(TargetOpcode::COPY));
5887 UseMI.removeOperand(
5888 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr));
5889 UseMI.removeOperand(
5890 UseMI.findRegisterDefOperandIdx(X86::EFLAGS,
nullptr));
5891 UseMI.untieRegOperand(0);
5895 unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
5896 unsigned ImmOpNum = 2;
5897 if (!
UseMI.getOperand(0).isDef()) {
5901 if (
Opc == TargetOpcode::COPY)
5905 commuteInstruction(
UseMI);
5909 UseMI.getOperand(ImmOpNum).ChangeToImmediate(ImmVal);
5927 return foldImmediateImpl(
UseMI, &
DefMI, Reg, ImmVal,
MRI,
true);
5939 assert(
Desc.getNumOperands() == 3 &&
"Expected two-addr instruction.");
5959 assert(
Desc.getNumOperands() == 3 &&
"Expected two-addr instruction.");
5977 MIB->
setDesc(
TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
5989 assert(Imm != 0 &&
"Using push/pop for 0 is not efficient.");
5992 int StackAdjustment;
5994 if (Subtarget.is64Bit()) {
5996 MIB->
getOpcode() == X86::MOV32ImmSExti8);
6010 StackAdjustment = 8;
6016 StackAdjustment = 4;
6028 bool EmitCFI = !TFL->
hasFP(MF) && NeedsDwarfCFI;
6075 MIB->
getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
6087 const MCInstrDesc &BroadcastDesc,
unsigned SubIdx) {
6090 if (
TRI->getEncodingValue(DestReg) < 16) {
6097 DestReg =
TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
6109 const MCInstrDesc &ExtractDesc,
unsigned SubIdx) {
6112 if (
TRI->getEncodingValue(SrcReg) < 16) {
6119 SrcReg =
TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
6142 if (
MI.getOpcode() == X86::MOVSHPrm) {
6143 NewOpc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
6145 if (
Reg > X86::XMM15)
6146 NewOpc = X86::VMOVSSZrm;
6148 NewOpc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
6150 if (
Reg > X86::XMM15)
6151 NewOpc = X86::VMOVSSZmr;
6159 bool HasAVX = Subtarget.hasAVX();
6161 switch (
MI.getOpcode()) {
6168 case X86::MOV32ImmSExti8:
6169 case X86::MOV64ImmSExti8:
6171 case X86::SETB_C32r:
6173 case X86::SETB_C64r:
6181 case X86::FsFLD0F128:
6183 case X86::AVX_SET0: {
6184 assert(HasAVX &&
"AVX not supported");
6187 Register XReg =
TRI->getSubReg(SrcReg, X86::sub_xmm);
6193 case X86::AVX512_128_SET0:
6194 case X86::AVX512_FsFLD0SH:
6195 case X86::AVX512_FsFLD0SS:
6196 case X86::AVX512_FsFLD0SD:
6197 case X86::AVX512_FsFLD0F128: {
6198 bool HasVLX = Subtarget.hasVLX();
6201 if (HasVLX ||
TRI->getEncodingValue(SrcReg) < 16)
6203 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6206 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
6210 case X86::AVX512_256_SET0:
6211 case X86::AVX512_512_SET0: {
6212 bool HasVLX = Subtarget.hasVLX();
6215 if (HasVLX ||
TRI->getEncodingValue(SrcReg) < 16) {
6216 Register XReg =
TRI->getSubReg(SrcReg, X86::sub_xmm);
6222 if (
MI.getOpcode() == X86::AVX512_256_SET0) {
6225 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
6233 case X86::V_SETALLONES:
6235 get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
6236 case X86::AVX2_SETALLONES:
6238 case X86::AVX1_SETALLONES: {
6245 case X86::AVX512_128_SETALLONES:
6246 case X86::AVX512_256_SETALLONES:
6247 case X86::AVX512_512_SETALLONES: {
6250 switch (
MI.getOpcode()) {
6251 case X86::AVX512_128_SETALLONES: {
6252 if (X86::VR128RegClass.
contains(Reg))
6255 Opc = X86::VPTERNLOGDZ128rri;
6258 case X86::AVX512_256_SETALLONES: {
6259 if (X86::VR256RegClass.
contains(Reg))
6262 Opc = X86::VPTERNLOGDZ256rri;
6265 case X86::AVX512_512_SETALLONES:
6266 Opc = X86::VPTERNLOGDZrri;
6278 case X86::AVX512_512_SEXT_MASK_32:
6279 case X86::AVX512_512_SEXT_MASK_64: {
6283 unsigned Opc = (
MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64)
6284 ? X86::VPTERNLOGQZrrikz
6285 : X86::VPTERNLOGDZrrikz;
6286 MI.removeOperand(1);
6291 .
addReg(MaskReg, MaskState)
6297 case X86::VMOVAPSZ128rm_NOVLX:
6299 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6300 case X86::VMOVUPSZ128rm_NOVLX:
6302 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6303 case X86::VMOVAPSZ256rm_NOVLX:
6305 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6306 case X86::VMOVUPSZ256rm_NOVLX:
6308 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6309 case X86::VMOVAPSZ128mr_NOVLX:
6311 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6312 case X86::VMOVUPSZ128mr_NOVLX:
6314 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6315 case X86::VMOVAPSZ256mr_NOVLX:
6317 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6318 case X86::VMOVUPSZ256mr_NOVLX:
6320 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6321 case X86::MOV32ri64: {
6323 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
6324 MI.setDesc(
get(X86::MOV32ri));
6330 case X86::RDFLAGS32:
6331 case X86::RDFLAGS64: {
6332 unsigned Is64Bit =
MI.getOpcode() == X86::RDFLAGS64;
6336 get(Is64Bit ? X86::PUSHF64 : X86::PUSHF32))
6344 "Unexpected register in operand! Should be EFLAGS.");
6347 "Unexpected register in operand! Should be DF.");
6350 MIB->
setDesc(
get(Is64Bit ? X86::POP64r : X86::POP32r));
6354 case X86::WRFLAGS32:
6355 case X86::WRFLAGS64: {
6356 unsigned Is64Bit =
MI.getOpcode() == X86::WRFLAGS64;
6360 get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
6361 .
addReg(
MI.getOperand(0).getReg());
6363 get(Is64Bit ? X86::POPF64 : X86::POPF32));
6364 MI.eraseFromParent();
6391 case TargetOpcode::LOAD_STACK_GUARD:
6397 case X86::SHLDROT32ri:
6399 case X86::SHLDROT64ri:
6401 case X86::SHRDROT32ri:
6403 case X86::SHRDROT64ri:
6405 case X86::ADD8rr_DB:
6408 case X86::ADD16rr_DB:
6411 case X86::ADD32rr_DB:
6414 case X86::ADD64rr_DB:
6417 case X86::ADD8ri_DB:
6420 case X86::ADD16ri_DB:
6423 case X86::ADD32ri_DB:
6426 case X86::ADD64ri32_DB:
6450 bool ForLoadFold =
false) {
6452 case X86::CVTSI2SSrr:
6453 case X86::CVTSI2SSrm:
6454 case X86::CVTSI642SSrr:
6455 case X86::CVTSI642SSrm:
6456 case X86::CVTSI2SDrr:
6457 case X86::CVTSI2SDrm:
6458 case X86::CVTSI642SDrr:
6459 case X86::CVTSI642SDrm:
6462 return !ForLoadFold;
6463 case X86::CVTSD2SSrr:
6464 case X86::CVTSD2SSrm:
6465 case X86::CVTSS2SDrr:
6466 case X86::CVTSS2SDrm:
6473 case X86::RCPSSr_Int:
6474 case X86::RCPSSm_Int:
6475 case X86::ROUNDSDri:
6476 case X86::ROUNDSDmi:
6477 case X86::ROUNDSSri:
6478 case X86::ROUNDSSmi:
6481 case X86::RSQRTSSr_Int:
6482 case X86::RSQRTSSm_Int:
6485 case X86::SQRTSSr_Int:
6486 case X86::SQRTSSm_Int:
6489 case X86::SQRTSDr_Int:
6490 case X86::SQRTSDm_Int:
6492 case X86::VFCMULCPHZ128rm:
6493 case X86::VFCMULCPHZ128rmb:
6494 case X86::VFCMULCPHZ128rmbkz:
6495 case X86::VFCMULCPHZ128rmkz:
6496 case X86::VFCMULCPHZ128rr:
6497 case X86::VFCMULCPHZ128rrkz:
6498 case X86::VFCMULCPHZ256rm:
6499 case X86::VFCMULCPHZ256rmb:
6500 case X86::VFCMULCPHZ256rmbkz:
6501 case X86::VFCMULCPHZ256rmkz:
6502 case X86::VFCMULCPHZ256rr:
6503 case X86::VFCMULCPHZ256rrkz:
6504 case X86::VFCMULCPHZrm:
6505 case X86::VFCMULCPHZrmb:
6506 case X86::VFCMULCPHZrmbkz:
6507 case X86::VFCMULCPHZrmkz:
6508 case X86::VFCMULCPHZrr:
6509 case X86::VFCMULCPHZrrb:
6510 case X86::VFCMULCPHZrrbkz:
6511 case X86::VFCMULCPHZrrkz:
6512 case X86::VFMULCPHZ128rm:
6513 case X86::VFMULCPHZ128rmb:
6514 case X86::VFMULCPHZ128rmbkz:
6515 case X86::VFMULCPHZ128rmkz:
6516 case X86::VFMULCPHZ128rr:
6517 case X86::VFMULCPHZ128rrkz:
6518 case X86::VFMULCPHZ256rm:
6519 case X86::VFMULCPHZ256rmb:
6520 case X86::VFMULCPHZ256rmbkz:
6521 case X86::VFMULCPHZ256rmkz:
6522 case X86::VFMULCPHZ256rr:
6523 case X86::VFMULCPHZ256rrkz:
6524 case X86::VFMULCPHZrm:
6525 case X86::VFMULCPHZrmb:
6526 case X86::VFMULCPHZrmbkz:
6527 case X86::VFMULCPHZrmkz:
6528 case X86::VFMULCPHZrr:
6529 case X86::VFMULCPHZrrb:
6530 case X86::VFMULCPHZrrbkz:
6531 case X86::VFMULCPHZrrkz:
6532 case X86::VFCMULCSHZrm:
6533 case X86::VFCMULCSHZrmkz:
6534 case X86::VFCMULCSHZrr:
6535 case X86::VFCMULCSHZrrb:
6536 case X86::VFCMULCSHZrrbkz:
6537 case X86::VFCMULCSHZrrkz:
6538 case X86::VFMULCSHZrm:
6539 case X86::VFMULCSHZrmkz:
6540 case X86::VFMULCSHZrr:
6541 case X86::VFMULCSHZrrb:
6542 case X86::VFMULCSHZrrbkz:
6543 case X86::VFMULCSHZrrkz:
6544 return Subtarget.hasMULCFalseDeps();
6545 case X86::VPERMDYrm:
6546 case X86::VPERMDYrr:
6547 case X86::VPERMQYmi:
6548 case X86::VPERMQYri:
6549 case X86::VPERMPSYrm:
6550 case X86::VPERMPSYrr:
6551 case X86::VPERMPDYmi:
6552 case X86::VPERMPDYri:
6553 case X86::VPERMDZ256rm:
6554 case X86::VPERMDZ256rmb:
6555 case X86::VPERMDZ256rmbkz:
6556 case X86::VPERMDZ256rmkz:
6557 case X86::VPERMDZ256rr:
6558 case X86::VPERMDZ256rrkz:
6559 case X86::VPERMDZrm:
6560 case X86::VPERMDZrmb:
6561 case X86::VPERMDZrmbkz:
6562 case X86::VPERMDZrmkz:
6563 case X86::VPERMDZrr:
6564 case X86::VPERMDZrrkz:
6565 case X86::VPERMQZ256mbi:
6566 case X86::VPERMQZ256mbikz:
6567 case X86::VPERMQZ256mi:
6568 case X86::VPERMQZ256mikz:
6569 case X86::VPERMQZ256ri:
6570 case X86::VPERMQZ256rikz:
6571 case X86::VPERMQZ256rm:
6572 case X86::VPERMQZ256rmb:
6573 case X86::VPERMQZ256rmbkz:
6574 case X86::VPERMQZ256rmkz:
6575 case X86::VPERMQZ256rr:
6576 case X86::VPERMQZ256rrkz:
6577 case X86::VPERMQZmbi:
6578 case X86::VPERMQZmbikz:
6579 case X86::VPERMQZmi:
6580 case X86::VPERMQZmikz:
6581 case X86::VPERMQZri:
6582 case X86::VPERMQZrikz:
6583 case X86::VPERMQZrm:
6584 case X86::VPERMQZrmb:
6585 case X86::VPERMQZrmbkz:
6586 case X86::VPERMQZrmkz:
6587 case X86::VPERMQZrr:
6588 case X86::VPERMQZrrkz:
6589 case X86::VPERMPSZ256rm:
6590 case X86::VPERMPSZ256rmb:
6591 case X86::VPERMPSZ256rmbkz:
6592 case X86::VPERMPSZ256rmkz:
6593 case X86::VPERMPSZ256rr:
6594 case X86::VPERMPSZ256rrkz:
6595 case X86::VPERMPSZrm:
6596 case X86::VPERMPSZrmb:
6597 case X86::VPERMPSZrmbkz:
6598 case X86::VPERMPSZrmkz:
6599 case X86::VPERMPSZrr:
6600 case X86::VPERMPSZrrkz:
6601 case X86::VPERMPDZ256mbi:
6602 case X86::VPERMPDZ256mbikz:
6603 case X86::VPERMPDZ256mi:
6604 case X86::VPERMPDZ256mikz:
6605 case X86::VPERMPDZ256ri:
6606 case X86::VPERMPDZ256rikz:
6607 case X86::VPERMPDZ256rm:
6608 case X86::VPERMPDZ256rmb:
6609 case X86::VPERMPDZ256rmbkz:
6610 case X86::VPERMPDZ256rmkz:
6611 case X86::VPERMPDZ256rr:
6612 case X86::VPERMPDZ256rrkz:
6613 case X86::VPERMPDZmbi:
6614 case X86::VPERMPDZmbikz:
6615 case X86::VPERMPDZmi:
6616 case X86::VPERMPDZmikz:
6617 case X86::VPERMPDZri:
6618 case X86::VPERMPDZrikz:
6619 case X86::VPERMPDZrm:
6620 case X86::VPERMPDZrmb:
6621 case X86::VPERMPDZrmbkz:
6622 case X86::VPERMPDZrmkz:
6623 case X86::VPERMPDZrr:
6624 case X86::VPERMPDZrrkz:
6625 return Subtarget.hasPERMFalseDeps();
6626 case X86::VRANGEPDZ128rmbi:
6627 case X86::VRANGEPDZ128rmbikz:
6628 case X86::VRANGEPDZ128rmi:
6629 case X86::VRANGEPDZ128rmikz:
6630 case X86::VRANGEPDZ128rri:
6631 case X86::VRANGEPDZ128rrikz:
6632 case X86::VRANGEPDZ256rmbi:
6633 case X86::VRANGEPDZ256rmbikz:
6634 case X86::VRANGEPDZ256rmi:
6635 case X86::VRANGEPDZ256rmikz:
6636 case X86::VRANGEPDZ256rri:
6637 case X86::VRANGEPDZ256rrikz:
6638 case X86::VRANGEPDZrmbi:
6639 case X86::VRANGEPDZrmbikz:
6640 case X86::VRANGEPDZrmi:
6641 case X86::VRANGEPDZrmikz:
6642 case X86::VRANGEPDZrri:
6643 case X86::VRANGEPDZrrib:
6644 case X86::VRANGEPDZrribkz:
6645 case X86::VRANGEPDZrrikz:
6646 case X86::VRANGEPSZ128rmbi:
6647 case X86::VRANGEPSZ128rmbikz:
6648 case X86::VRANGEPSZ128rmi:
6649 case X86::VRANGEPSZ128rmikz:
6650 case X86::VRANGEPSZ128rri:
6651 case X86::VRANGEPSZ128rrikz:
6652 case X86::VRANGEPSZ256rmbi:
6653 case X86::VRANGEPSZ256rmbikz:
6654 case X86::VRANGEPSZ256rmi:
6655 case X86::VRANGEPSZ256rmikz:
6656 case X86::VRANGEPSZ256rri:
6657 case X86::VRANGEPSZ256rrikz:
6658 case X86::VRANGEPSZrmbi:
6659 case X86::VRANGEPSZrmbikz:
6660 case X86::VRANGEPSZrmi:
6661 case X86::VRANGEPSZrmikz:
6662 case X86::VRANGEPSZrri:
6663 case X86::VRANGEPSZrrib:
6664 case X86::VRANGEPSZrribkz:
6665 case X86::VRANGEPSZrrikz:
6666 case X86::VRANGESDZrmi:
6667 case X86::VRANGESDZrmikz:
6668 case X86::VRANGESDZrri:
6669 case X86::VRANGESDZrrib:
6670 case X86::VRANGESDZrribkz:
6671 case X86::VRANGESDZrrikz:
6672 case X86::VRANGESSZrmi:
6673 case X86::VRANGESSZrmikz:
6674 case X86::VRANGESSZrri:
6675 case X86::VRANGESSZrrib:
6676 case X86::VRANGESSZrribkz:
6677 case X86::VRANGESSZrrikz:
6678 return Subtarget.hasRANGEFalseDeps();
6679 case X86::VGETMANTSSZrmi:
6680 case X86::VGETMANTSSZrmikz:
6681 case X86::VGETMANTSSZrri:
6682 case X86::VGETMANTSSZrrib:
6683 case X86::VGETMANTSSZrribkz:
6684 case X86::VGETMANTSSZrrikz:
6685 case X86::VGETMANTSDZrmi:
6686 case X86::VGETMANTSDZrmikz:
6687 case X86::VGETMANTSDZrri:
6688 case X86::VGETMANTSDZrrib:
6689 case X86::VGETMANTSDZrribkz:
6690 case X86::VGETMANTSDZrrikz:
6691 case X86::VGETMANTSHZrmi:
6692 case X86::VGETMANTSHZrmikz:
6693 case X86::VGETMANTSHZrri:
6694 case X86::VGETMANTSHZrrib:
6695 case X86::VGETMANTSHZrribkz:
6696 case X86::VGETMANTSHZrrikz:
6697 case X86::VGETMANTPSZ128rmbi:
6698 case X86::VGETMANTPSZ128rmbikz:
6699 case X86::VGETMANTPSZ128rmi:
6700 case X86::VGETMANTPSZ128rmikz:
6701 case X86::VGETMANTPSZ256rmbi:
6702 case X86::VGETMANTPSZ256rmbikz:
6703 case X86::VGETMANTPSZ256rmi:
6704 case X86::VGETMANTPSZ256rmikz:
6705 case X86::VGETMANTPSZrmbi:
6706 case X86::VGETMANTPSZrmbikz:
6707 case X86::VGETMANTPSZrmi:
6708 case X86::VGETMANTPSZrmikz:
6709 case X86::VGETMANTPDZ128rmbi:
6710 case X86::VGETMANTPDZ128rmbikz:
6711 case X86::VGETMANTPDZ128rmi:
6712 case X86::VGETMANTPDZ128rmikz:
6713 case X86::VGETMANTPDZ256rmbi:
6714 case X86::VGETMANTPDZ256rmbikz:
6715 case X86::VGETMANTPDZ256rmi:
6716 case X86::VGETMANTPDZ256rmikz:
6717 case X86::VGETMANTPDZrmbi:
6718 case X86::VGETMANTPDZrmbikz:
6719 case X86::VGETMANTPDZrmi:
6720 case X86::VGETMANTPDZrmikz:
6721 return Subtarget.hasGETMANTFalseDeps();
6722 case X86::VPMULLQZ128rm:
6723 case X86::VPMULLQZ128rmb:
6724 case X86::VPMULLQZ128rmbkz:
6725 case X86::VPMULLQZ128rmkz:
6726 case X86::VPMULLQZ128rr:
6727 case X86::VPMULLQZ128rrkz:
6728 case X86::VPMULLQZ256rm:
6729 case X86::VPMULLQZ256rmb:
6730 case X86::VPMULLQZ256rmbkz:
6731 case X86::VPMULLQZ256rmkz:
6732 case X86::VPMULLQZ256rr:
6733 case X86::VPMULLQZ256rrkz:
6734 case X86::VPMULLQZrm:
6735 case X86::VPMULLQZrmb:
6736 case X86::VPMULLQZrmbkz:
6737 case X86::VPMULLQZrmkz:
6738 case X86::VPMULLQZrr:
6739 case X86::VPMULLQZrrkz:
6740 return Subtarget.hasMULLQFalseDeps();
6742 case X86::POPCNT32rm:
6743 case X86::POPCNT32rr:
6744 case X86::POPCNT64rm:
6745 case X86::POPCNT64rr:
6746 return Subtarget.hasPOPCNTFalseDeps();
6747 case X86::LZCNT32rm:
6748 case X86::LZCNT32rr:
6749 case X86::LZCNT64rm:
6750 case X86::LZCNT64rr:
6751 case X86::TZCNT32rm:
6752 case X86::TZCNT32rr:
6753 case X86::TZCNT64rm:
6754 case X86::TZCNT64rr:
6755 return Subtarget.hasLZCNTFalseDeps();
6772 bool HasNDDPartialWrite =
false;
6775 if (!Reg.isVirtual())
6776 HasNDDPartialWrite =
6777 X86::GR8RegClass.contains(Reg) || X86::GR16RegClass.contains(Reg);
6790 bool ReadsReg =
false;
6791 if (Reg.isVirtual())
6792 ReadsReg = (MO.
readsReg() ||
MI.readsVirtualRegister(Reg));
6794 ReadsReg =
MI.readsRegister(Reg,
TRI);
6795 if (ReadsReg != HasNDDPartialWrite)
6809 bool ForLoadFold =
false) {
6812 case X86::MMX_PUNPCKHBWrr:
6813 case X86::MMX_PUNPCKHWDrr:
6814 case X86::MMX_PUNPCKHDQrr:
6815 case X86::MMX_PUNPCKLBWrr:
6816 case X86::MMX_PUNPCKLWDrr:
6817 case X86::MMX_PUNPCKLDQrr:
6818 case X86::MOVHLPSrr:
6819 case X86::PACKSSWBrr:
6820 case X86::PACKUSWBrr:
6821 case X86::PACKSSDWrr:
6822 case X86::PACKUSDWrr:
6823 case X86::PUNPCKHBWrr:
6824 case X86::PUNPCKLBWrr:
6825 case X86::PUNPCKHWDrr:
6826 case X86::PUNPCKLWDrr:
6827 case X86::PUNPCKHDQrr:
6828 case X86::PUNPCKLDQrr:
6829 case X86::PUNPCKHQDQrr:
6830 case X86::PUNPCKLQDQrr:
6831 case X86::SHUFPDrri:
6832 case X86::SHUFPSrri:
6838 return OpNum == 2 && !ForLoadFold;
6840 case X86::VMOVLHPSrr:
6841 case X86::VMOVLHPSZrr:
6842 case X86::VPACKSSWBrr:
6843 case X86::VPACKUSWBrr:
6844 case X86::VPACKSSDWrr:
6845 case X86::VPACKUSDWrr:
6846 case X86::VPACKSSWBZ128rr:
6847 case X86::VPACKUSWBZ128rr:
6848 case X86::VPACKSSDWZ128rr:
6849 case X86::VPACKUSDWZ128rr:
6850 case X86::VPERM2F128rri:
6851 case X86::VPERM2I128rri:
6852 case X86::VSHUFF32X4Z256rri:
6853 case X86::VSHUFF32X4Zrri:
6854 case X86::VSHUFF64X2Z256rri:
6855 case X86::VSHUFF64X2Zrri:
6856 case X86::VSHUFI32X4Z256rri:
6857 case X86::VSHUFI32X4Zrri:
6858 case X86::VSHUFI64X2Z256rri:
6859 case X86::VSHUFI64X2Zrri:
6860 case X86::VPUNPCKHBWrr:
6861 case X86::VPUNPCKLBWrr:
6862 case X86::VPUNPCKHBWYrr:
6863 case X86::VPUNPCKLBWYrr:
6864 case X86::VPUNPCKHBWZ128rr:
6865 case X86::VPUNPCKLBWZ128rr:
6866 case X86::VPUNPCKHBWZ256rr:
6867 case X86::VPUNPCKLBWZ256rr:
6868 case X86::VPUNPCKHBWZrr:
6869 case X86::VPUNPCKLBWZrr:
6870 case X86::VPUNPCKHWDrr:
6871 case X86::VPUNPCKLWDrr:
6872 case X86::VPUNPCKHWDYrr:
6873 case X86::VPUNPCKLWDYrr:
6874 case X86::VPUNPCKHWDZ128rr:
6875 case X86::VPUNPCKLWDZ128rr:
6876 case X86::VPUNPCKHWDZ256rr:
6877 case X86::VPUNPCKLWDZ256rr:
6878 case X86::VPUNPCKHWDZrr:
6879 case X86::VPUNPCKLWDZrr:
6880 case X86::VPUNPCKHDQrr:
6881 case X86::VPUNPCKLDQrr:
6882 case X86::VPUNPCKHDQYrr:
6883 case X86::VPUNPCKLDQYrr:
6884 case X86::VPUNPCKHDQZ128rr:
6885 case X86::VPUNPCKLDQZ128rr:
6886 case X86::VPUNPCKHDQZ256rr:
6887 case X86::VPUNPCKLDQZ256rr:
6888 case X86::VPUNPCKHDQZrr:
6889 case X86::VPUNPCKLDQZrr:
6890 case X86::VPUNPCKHQDQrr:
6891 case X86::VPUNPCKLQDQrr:
6892 case X86::VPUNPCKHQDQYrr:
6893 case X86::VPUNPCKLQDQYrr:
6894 case X86::VPUNPCKHQDQZ128rr:
6895 case X86::VPUNPCKLQDQZ128rr:
6896 case X86::VPUNPCKHQDQZ256rr:
6897 case X86::VPUNPCKLQDQZ256rr:
6898 case X86::VPUNPCKHQDQZrr:
6899 case X86::VPUNPCKLQDQZrr:
6903 return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
6905 case X86::VCVTSI2SSrr:
6906 case X86::VCVTSI2SSrm:
6907 case X86::VCVTSI2SSrr_Int:
6908 case X86::VCVTSI2SSrm_Int:
6909 case X86::VCVTSI642SSrr:
6910 case X86::VCVTSI642SSrm:
6911 case X86::VCVTSI642SSrr_Int:
6912 case X86::VCVTSI642SSrm_Int:
6913 case X86::VCVTSI2SDrr:
6914 case X86::VCVTSI2SDrm:
6915 case X86::VCVTSI2SDrr_Int:
6916 case X86::VCVTSI2SDrm_Int:
6917 case X86::VCVTSI642SDrr:
6918 case X86::VCVTSI642SDrm:
6919 case X86::VCVTSI642SDrr_Int:
6920 case X86::VCVTSI642SDrm_Int:
6922 case X86::VCVTSI2SSZrr:
6923 case X86::VCVTSI2SSZrm:
6924 case X86::VCVTSI2SSZrr_Int:
6925 case X86::VCVTSI2SSZrrb_Int:
6926 case X86::VCVTSI2SSZrm_Int:
6927 case X86::VCVTSI642SSZrr:
6928 case X86::VCVTSI642SSZrm:
6929 case X86::VCVTSI642SSZrr_Int:
6930 case X86::VCVTSI642SSZrrb_Int:
6931 case X86::VCVTSI642SSZrm_Int:
6932 case X86::VCVTSI2SDZrr:
6933 case X86::VCVTSI2SDZrm:
6934 case X86::VCVTSI2SDZrr_Int:
6935 case X86::VCVTSI2SDZrm_Int:
6936 case X86::VCVTSI642SDZrr:
6937 case X86::VCVTSI642SDZrm:
6938 case X86::VCVTSI642SDZrr_Int:
6939 case X86::VCVTSI642SDZrrb_Int:
6940 case X86::VCVTSI642SDZrm_Int:
6941 case X86::VCVTUSI2SSZrr:
6942 case X86::VCVTUSI2SSZrm:
6943 case X86::VCVTUSI2SSZrr_Int:
6944 case X86::VCVTUSI2SSZrrb_Int:
6945 case X86::VCVTUSI2SSZrm_Int:
6946 case X86::VCVTUSI642SSZrr:
6947 case X86::VCVTUSI642SSZrm:
6948 case X86::VCVTUSI642SSZrr_Int:
6949 case X86::VCVTUSI642SSZrrb_Int:
6950 case X86::VCVTUSI642SSZrm_Int:
6951 case X86::VCVTUSI2SDZrr:
6952 case X86::VCVTUSI2SDZrm:
6953 case X86::VCVTUSI2SDZrr_Int:
6954 case X86::VCVTUSI2SDZrm_Int:
6955 case X86::VCVTUSI642SDZrr:
6956 case X86::VCVTUSI642SDZrm:
6957 case X86::VCVTUSI642SDZrr_Int:
6958 case X86::VCVTUSI642SDZrrb_Int:
6959 case X86::VCVTUSI642SDZrm_Int:
6960 case X86::VCVTSI2SHZrr:
6961 case X86::VCVTSI2SHZrm:
6962 case X86::VCVTSI2SHZrr_Int:
6963 case X86::VCVTSI2SHZrrb_Int:
6964 case X86::VCVTSI2SHZrm_Int:
6965 case X86::VCVTSI642SHZrr:
6966 case X86::VCVTSI642SHZrm:
6967 case X86::VCVTSI642SHZrr_Int:
6968 case X86::VCVTSI642SHZrrb_Int:
6969 case X86::VCVTSI642SHZrm_Int:
6970 case X86::VCVTUSI2SHZrr:
6971 case X86::VCVTUSI2SHZrm:
6972 case X86::VCVTUSI2SHZrr_Int:
6973 case X86::VCVTUSI2SHZrrb_Int:
6974 case X86::VCVTUSI2SHZrm_Int:
6975 case X86::VCVTUSI642SHZrr:
6976 case X86::VCVTUSI642SHZrm:
6977 case X86::VCVTUSI642SHZrr_Int:
6978 case X86::VCVTUSI642SHZrrb_Int:
6979 case X86::VCVTUSI642SHZrm_Int:
6982 return OpNum == 1 && !ForLoadFold;
6983 case X86::VCVTSD2SSrr:
6984 case X86::VCVTSD2SSrm:
6985 case X86::VCVTSD2SSrr_Int:
6986 case X86::VCVTSD2SSrm_Int:
6987 case X86::VCVTSS2SDrr:
6988 case X86::VCVTSS2SDrm:
6989 case X86::VCVTSS2SDrr_Int:
6990 case X86::VCVTSS2SDrm_Int:
6992 case X86::VRCPSSr_Int:
6994 case X86::VRCPSSm_Int:
6995 case X86::VROUNDSDri:
6996 case X86::VROUNDSDmi:
6997 case X86::VROUNDSDri_Int:
6998 case X86::VROUNDSDmi_Int:
6999 case X86::VROUNDSSri:
7000 case X86::VROUNDSSmi:
7001 case X86::VROUNDSSri_Int:
7002 case X86::VROUNDSSmi_Int:
7003 case X86::VRSQRTSSr:
7004 case X86::VRSQRTSSr_Int:
7005 case X86::VRSQRTSSm:
7006 case X86::VRSQRTSSm_Int:
7008 case X86::VSQRTSSr_Int:
7010 case X86::VSQRTSSm_Int:
7012 case X86::VSQRTSDr_Int:
7014 case X86::VSQRTSDm_Int:
7016 case X86::VCVTSD2SSZrr:
7017 case X86::VCVTSD2SSZrr_Int:
7018 case X86::VCVTSD2SSZrrb_Int:
7019 case X86::VCVTSD2SSZrm:
7020 case X86::VCVTSD2SSZrm_Int:
7021 case X86::VCVTSS2SDZrr:
7022 case X86::VCVTSS2SDZrr_Int:
7023 case X86::VCVTSS2SDZrrb_Int:
7024 case X86::VCVTSS2SDZrm:
7025 case X86::VCVTSS2SDZrm_Int:
7026 case X86::VGETEXPSDZr:
7027 case X86::VGETEXPSDZrb:
7028 case X86::VGETEXPSDZm:
7029 case X86::VGETEXPSSZr:
7030 case X86::VGETEXPSSZrb:
7031 case X86::VGETEXPSSZm:
7032 case X86::VGETMANTSDZrri:
7033 case X86::VGETMANTSDZrrib:
7034 case X86::VGETMANTSDZrmi:
7035 case X86::VGETMANTSSZrri:
7036 case X86::VGETMANTSSZrrib:
7037 case X86::VGETMANTSSZrmi:
7038 case X86::VRNDSCALESDZrri:
7039 case X86::VRNDSCALESDZrri_Int:
7040 case X86::VRNDSCALESDZrrib_Int:
7041 case X86::VRNDSCALESDZrmi:
7042 case X86::VRNDSCALESDZrmi_Int:
7043 case X86::VRNDSCALESSZrri:
7044 case X86::VRNDSCALESSZrri_Int:
7045 case X86::VRNDSCALESSZrrib_Int:
7046 case X86::VRNDSCALESSZrmi:
7047 case X86::VRNDSCALESSZrmi_Int:
7048 case X86::VRCP14SDZrr:
7049 case X86::VRCP14SDZrm:
7050 case X86::VRCP14SSZrr:
7051 case X86::VRCP14SSZrm:
7052 case X86::VRCPSHZrr:
7053 case X86::VRCPSHZrm:
7054 case X86::VRSQRTSHZrr:
7055 case X86::VRSQRTSHZrm:
7056 case X86::VREDUCESHZrmi:
7057 case X86::VREDUCESHZrri:
7058 case X86::VREDUCESHZrrib:
7059 case X86::VGETEXPSHZr:
7060 case X86::VGETEXPSHZrb:
7061 case X86::VGETEXPSHZm:
7062 case X86::VGETMANTSHZrri:
7063 case X86::VGETMANTSHZrrib:
7064 case X86::VGETMANTSHZrmi:
7065 case X86::VRNDSCALESHZrri:
7066 case X86::VRNDSCALESHZrri_Int:
7067 case X86::VRNDSCALESHZrrib_Int:
7068 case X86::VRNDSCALESHZrmi:
7069 case X86::VRNDSCALESHZrmi_Int:
7070 case X86::VSQRTSHZr:
7071 case X86::VSQRTSHZr_Int:
7072 case X86::VSQRTSHZrb_Int:
7073 case X86::VSQRTSHZm:
7074 case X86::VSQRTSHZm_Int:
7075 case X86::VRCP28SDZr:
7076 case X86::VRCP28SDZrb:
7077 case X86::VRCP28SDZm:
7078 case X86::VRCP28SSZr:
7079 case X86::VRCP28SSZrb:
7080 case X86::VRCP28SSZm:
7081 case X86::VREDUCESSZrmi:
7082 case X86::VREDUCESSZrri:
7083 case X86::VREDUCESSZrrib:
7084 case X86::VRSQRT14SDZrr:
7085 case X86::VRSQRT14SDZrm:
7086 case X86::VRSQRT14SSZrr:
7087 case X86::VRSQRT14SSZrm:
7088 case X86::VRSQRT28SDZr:
7089 case X86::VRSQRT28SDZrb:
7090 case X86::VRSQRT28SDZm:
7091 case X86::VRSQRT28SSZr:
7092 case X86::VRSQRT28SSZrb:
7093 case X86::VRSQRT28SSZm:
7094 case X86::VSQRTSSZr:
7095 case X86::VSQRTSSZr_Int:
7096 case X86::VSQRTSSZrb_Int:
7097 case X86::VSQRTSSZm:
7098 case X86::VSQRTSSZm_Int:
7099 case X86::VSQRTSDZr:
7100 case X86::VSQRTSDZr_Int:
7101 case X86::VSQRTSDZrb_Int:
7102 case X86::VSQRTSDZm:
7103 case X86::VSQRTSDZm_Int:
7104 case X86::VCVTSD2SHZrr:
7105 case X86::VCVTSD2SHZrr_Int:
7106 case X86::VCVTSD2SHZrrb_Int:
7107 case X86::VCVTSD2SHZrm:
7108 case X86::VCVTSD2SHZrm_Int:
7109 case X86::VCVTSS2SHZrr:
7110 case X86::VCVTSS2SHZrr_Int:
7111 case X86::VCVTSS2SHZrrb_Int:
7112 case X86::VCVTSS2SHZrm:
7113 case X86::VCVTSS2SHZrm_Int:
7114 case X86::VCVTSH2SDZrr:
7115 case X86::VCVTSH2SDZrr_Int:
7116 case X86::VCVTSH2SDZrrb_Int:
7117 case X86::VCVTSH2SDZrm:
7118 case X86::VCVTSH2SDZrm_Int:
7119 case X86::VCVTSH2SSZrr:
7120 case X86::VCVTSH2SSZrr_Int:
7121 case X86::VCVTSH2SSZrrb_Int:
7122 case X86::VCVTSH2SSZrm:
7123 case X86::VCVTSH2SSZrm_Int:
7125 case X86::VMOVSSZrrk:
7126 case X86::VMOVSDZrrk:
7127 return OpNum == 3 && !ForLoadFold;
7128 case X86::VMOVSSZrrkz:
7129 case X86::VMOVSDZrrkz:
7130 return OpNum == 2 && !ForLoadFold;
7162 Register Reg =
MI.getOperand(OpNum).getReg();
7164 if (
MI.killsRegister(Reg,
TRI))
7167 if (X86::VR128RegClass.
contains(Reg)) {
7170 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
7174 MI.addRegisterKilled(Reg,
TRI,
true);
7175 }
else if (X86::VR256RegClass.
contains(Reg)) {
7178 Register XReg =
TRI->getSubReg(Reg, X86::sub_xmm);
7183 MI.addRegisterKilled(Reg,
TRI,
true);
7184 }
else if (X86::VR128XRegClass.
contains(Reg)) {
7186 if (!Subtarget.hasVLX())
7189 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
get(X86::VPXORDZ128rr), Reg)
7192 MI.addRegisterKilled(Reg,
TRI,
true);
7193 }
else if (X86::VR256XRegClass.
contains(Reg) ||
7194 X86::VR512RegClass.
contains(Reg)) {
7196 if (!Subtarget.hasVLX())
7200 Register XReg =
TRI->getSubReg(Reg, X86::sub_xmm);
7201 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
get(X86::VPXORDZ128rr), XReg)
7205 MI.addRegisterKilled(Reg,
TRI,
true);
7206 }
else if (X86::GR64RegClass.
contains(Reg)) {
7209 Register XReg =
TRI->getSubReg(Reg, X86::sub_32bit);
7214 MI.addRegisterKilled(Reg,
TRI,
true);
7215 }
else if (X86::GR32RegClass.
contains(Reg)) {
7219 MI.addRegisterKilled(Reg,
TRI,
true);
7220 }
else if ((X86::GR16RegClass.
contains(Reg) ||
7229 if (!
MI.definesRegister(SuperReg,
nullptr))
7235 int PtrOffset = 0) {
7236 unsigned NumAddrOps = MOs.
size();
7238 if (NumAddrOps < 4) {
7240 for (
unsigned i = 0; i != NumAddrOps; ++i)
7246 assert(MOs.
size() == 5 &&
"Unexpected memory operand list length");
7247 for (
unsigned i = 0; i != NumAddrOps; ++i) {
7249 if (i == 3 && PtrOffset != 0) {
7269 if (!
Reg.isVirtual())
7276 dbgs() <<
"WARNING: Unable to update register constraint for operand "
7277 << Idx <<
" of instruction:\n";
7291 MF.CreateMachineInstr(
TII.get(Opcode),
MI.getDebugLoc(),
true);
7296 unsigned NumOps =
MI.getDesc().getNumOperands() - 2;
7297 for (
unsigned i = 0; i !=
NumOps; ++i) {
7307 MBB->insert(InsertPt, NewMI);
7316 int PtrOffset = 0) {
7319 MF.CreateMachineInstr(
TII.get(Opcode),
MI.getDebugLoc(),
true);
7322 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
7325 assert(MO.
isReg() &&
"Expected to fold into reg operand!");
7339 MBB->insert(InsertPt, NewMI);
7349 MI.getDebugLoc(),
TII.get(Opcode));
7358 switch (
MI.getOpcode()) {
7359 case X86::INSERTPSrri:
7360 case X86::VINSERTPSrri:
7361 case X86::VINSERTPSZrri:
7365 unsigned Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
7366 unsigned ZMask =
Imm & 15;
7367 unsigned DstIdx = (
Imm >> 4) & 3;
7368 unsigned SrcIdx = (
Imm >> 6) & 3;
7371 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum);
7372 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7373 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 &&
7374 (
MI.getOpcode() != X86::INSERTPSrri || Alignment >=
Align(4))) {
7375 int PtrOffset = SrcIdx * 4;
7376 unsigned NewImm = (DstIdx << 4) | ZMask;
7377 unsigned NewOpCode =
7378 (
MI.getOpcode() == X86::VINSERTPSZrri) ? X86::VINSERTPSZrmi
7379 : (
MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi
7381 MachineInstr *NewMI =
7382 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt,
MI, *
this, PtrOffset);
7388 case X86::MOVHLPSrr:
7389 case X86::VMOVHLPSrr:
7390 case X86::VMOVHLPSZrr:
7396 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum);
7397 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7398 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 && Alignment >=
Align(8)) {
7399 unsigned NewOpCode =
7400 (
MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm
7401 : (
MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm
7403 MachineInstr *NewMI =
7404 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt,
MI, *
this, 8);
7409 case X86::UNPCKLPDrr:
7415 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum);
7416 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7417 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 && Alignment <
Align(16)) {
7418 MachineInstr *NewMI =
7419 fuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt,
MI, *
this);
7426 makeM0Inst(*
this, (
Size == 4) ? X86::MOV32mi : X86::MOV64mi32, MOs,
7438 !
MI.getOperand(1).isReg())
7446 if (
MI.getOperand(1).isUndef())
7455 unsigned Idx1)
const {
7456 unsigned Idx2 = CommuteAnyOperandIndex;
7460 bool HasDef =
MI.getDesc().getNumDefs();
7462 Register Reg1 =
MI.getOperand(Idx1).getReg();
7463 Register Reg2 =
MI.getOperand(Idx2).getReg();
7464 bool Tied1 = 0 ==
MI.getDesc().getOperandConstraint(Idx1,
MCOI::TIED_TO);
7465 bool Tied2 = 0 ==
MI.getDesc().getOperandConstraint(Idx2,
MCOI::TIED_TO);
7469 if ((HasDef && Reg0 == Reg1 && Tied1) || (HasDef && Reg0 == Reg2 && Tied2))
7472 return commuteInstruction(
MI,
false, Idx1, Idx2) ? Idx2 : Idx1;
7477 dbgs() <<
"We failed to fuse operand " << Idx <<
" in " <<
MI;
7483 unsigned Size,
Align Alignment,
bool AllowCommute)
const {
7484 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
7485 unsigned Opc =
MI.getOpcode();
7491 (
Opc == X86::CALL32r ||
Opc == X86::CALL64r ||
7492 Opc == X86::CALL64r_ImpCall ||
Opc == X86::PUSH16r ||
7493 Opc == X86::PUSH32r ||
Opc == X86::PUSH64r))
7502 unsigned NumOps =
MI.getDesc().getNumOperands();
7503 bool IsTwoAddr =
NumOps > 1 && OpNum < 2 &&
MI.getOperand(0).isReg() &&
7504 MI.getOperand(1).isReg() &&
7505 MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg();
7509 if (
Opc == X86::ADD32ri &&
7518 Opc != X86::ADD64rr)
7523 if (
MI.isCall() &&
MI.getCFIType())
7527 if (
auto *CustomMI = foldMemoryOperandCustom(MF,
MI, OpNum, MOs, InsertPt,
7543 unsigned Opcode =
I->DstOp;
7547 bool NarrowToMOV32rm =
false;
7551 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7559 if (Opcode != X86::MOV64rm || RCSize != 8 ||
Size != 4)
7561 if (
MI.getOperand(0).getSubReg() ||
MI.getOperand(1).getSubReg())
7563 Opcode = X86::MOV32rm;
7564 NarrowToMOV32rm =
true;
7574 :
fuseInst(MF, Opcode, OpNum, MOs, InsertPt,
MI, *
this);
7576 if (NarrowToMOV32rm) {
7592 unsigned CommuteOpIdx2 = commuteOperandsForFold(
MI, OpNum);
7593 if (CommuteOpIdx2 == OpNum) {
7603 commuteInstruction(
MI,
false, OpNum, CommuteOpIdx2);
7625 for (
auto Op :
Ops) {
7630 if (
MI.getOpcode() == X86::MOV32r0 &&
SubReg == X86::sub_32bit)
7641 if (!RI.hasStackRealignment(MF))
7643 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
7648 Size, Alignment,
true);
7650 if (
Ops.size() == 2 &&
Ops[0] == 0 &&
Ops[1] == 1) {
7651 unsigned NewOpc = 0;
7652 unsigned RCSize = 0;
7653 unsigned Opc =
MI.getOpcode();
7660 NewOpc = X86::CMP8ri;
7664 NewOpc = X86::CMP16ri;
7668 NewOpc = X86::CMP32ri;
7672 NewOpc = X86::CMP64ri32;
7681 MI.setDesc(
get(NewOpc));
7682 MI.getOperand(1).ChangeToImmediate(0);
7683 }
else if (
Ops.size() != 1)
7711 unsigned RegSize =
TRI.getRegSizeInBits(*RC);
7713 if ((
Opc == X86::MOVSSrm ||
Opc == X86::VMOVSSrm ||
Opc == X86::VMOVSSZrm ||
7714 Opc == X86::MOVSSrm_alt ||
Opc == X86::VMOVSSrm_alt ||
7715 Opc == X86::VMOVSSZrm_alt) &&
7721 case X86::CVTSS2SDrr_Int:
7722 case X86::VCVTSS2SDrr_Int:
7723 case X86::VCVTSS2SDZrr_Int:
7724 case X86::VCVTSS2SDZrrk_Int:
7725 case X86::VCVTSS2SDZrrkz_Int:
7726 case X86::CVTSS2SIrr_Int:
7727 case X86::CVTSS2SI64rr_Int:
7728 case X86::VCVTSS2SIrr_Int:
7729 case X86::VCVTSS2SI64rr_Int:
7730 case X86::VCVTSS2SIZrr_Int:
7731 case X86::VCVTSS2SI64Zrr_Int:
7732 case X86::CVTTSS2SIrr_Int:
7733 case X86::CVTTSS2SI64rr_Int:
7734 case X86::VCVTTSS2SIrr_Int:
7735 case X86::VCVTTSS2SI64rr_Int:
7736 case X86::VCVTTSS2SIZrr_Int:
7737 case X86::VCVTTSS2SI64Zrr_Int:
7738 case X86::VCVTSS2USIZrr_Int:
7739 case X86::VCVTSS2USI64Zrr_Int:
7740 case X86::VCVTTSS2USIZrr_Int:
7741 case X86::VCVTTSS2USI64Zrr_Int:
7742 case X86::RCPSSr_Int:
7743 case X86::VRCPSSr_Int:
7744 case X86::RSQRTSSr_Int:
7745 case X86::VRSQRTSSr_Int:
7746 case X86::ROUNDSSri_Int:
7747 case X86::VROUNDSSri_Int:
7748 case X86::COMISSrr_Int:
7749 case X86::VCOMISSrr_Int:
7750 case X86::VCOMISSZrr_Int:
7751 case X86::UCOMISSrr_Int:
7752 case X86::VUCOMISSrr_Int:
7753 case X86::VUCOMISSZrr_Int:
7754 case X86::ADDSSrr_Int:
7755 case X86::VADDSSrr_Int:
7756 case X86::VADDSSZrr_Int:
7757 case X86::CMPSSrri_Int:
7758 case X86::VCMPSSrri_Int:
7759 case X86::VCMPSSZrri_Int:
7760 case X86::DIVSSrr_Int:
7761 case X86::VDIVSSrr_Int:
7762 case X86::VDIVSSZrr_Int:
7763 case X86::MAXSSrr_Int:
7764 case X86::VMAXSSrr_Int:
7765 case X86::VMAXSSZrr_Int:
7766 case X86::MINSSrr_Int:
7767 case X86::VMINSSrr_Int:
7768 case X86::VMINSSZrr_Int:
7769 case X86::MULSSrr_Int:
7770 case X86::VMULSSrr_Int:
7771 case X86::VMULSSZrr_Int:
7772 case X86::SQRTSSr_Int:
7773 case X86::VSQRTSSr_Int:
7774 case X86::VSQRTSSZr_Int:
7775 case X86::SUBSSrr_Int:
7776 case X86::VSUBSSrr_Int:
7777 case X86::VSUBSSZrr_Int:
7778 case X86::VADDSSZrrk_Int:
7779 case X86::VADDSSZrrkz_Int:
7780 case X86::VCMPSSZrrik_Int:
7781 case X86::VDIVSSZrrk_Int:
7782 case X86::VDIVSSZrrkz_Int:
7783 case X86::VMAXSSZrrk_Int:
7784 case X86::VMAXSSZrrkz_Int:
7785 case X86::VMINSSZrrk_Int:
7786 case X86::VMINSSZrrkz_Int:
7787 case X86::VMULSSZrrk_Int:
7788 case X86::VMULSSZrrkz_Int:
7789 case X86::VSQRTSSZrk_Int:
7790 case X86::VSQRTSSZrkz_Int:
7791 case X86::VSUBSSZrrk_Int:
7792 case X86::VSUBSSZrrkz_Int:
7793 case X86::VFMADDSS4rr_Int:
7794 case X86::VFNMADDSS4rr_Int:
7795 case X86::VFMSUBSS4rr_Int:
7796 case X86::VFNMSUBSS4rr_Int:
7797 case X86::VFMADD132SSr_Int:
7798 case X86::VFNMADD132SSr_Int:
7799 case X86::VFMADD213SSr_Int:
7800 case X86::VFNMADD213SSr_Int:
7801 case X86::VFMADD231SSr_Int:
7802 case X86::VFNMADD231SSr_Int:
7803 case X86::VFMSUB132SSr_Int:
7804 case X86::VFNMSUB132SSr_Int:
7805 case X86::VFMSUB213SSr_Int:
7806 case X86::VFNMSUB213SSr_Int:
7807 case X86::VFMSUB231SSr_Int:
7808 case X86::VFNMSUB231SSr_Int:
7809 case X86::VFMADD132SSZr_Int:
7810 case X86::VFNMADD132SSZr_Int:
7811 case X86::VFMADD213SSZr_Int:
7812 case X86::VFNMADD213SSZr_Int:
7813 case X86::VFMADD231SSZr_Int:
7814 case X86::VFNMADD231SSZr_Int:
7815 case X86::VFMSUB132SSZr_Int:
7816 case X86::VFNMSUB132SSZr_Int:
7817 case X86::VFMSUB213SSZr_Int:
7818 case X86::VFNMSUB213SSZr_Int:
7819 case X86::VFMSUB231SSZr_Int:
7820 case X86::VFNMSUB231SSZr_Int:
7821 case X86::VFMADD132SSZrk_Int:
7822 case X86::VFNMADD132SSZrk_Int:
7823 case X86::VFMADD213SSZrk_Int:
7824 case X86::VFNMADD213SSZrk_Int:
7825 case X86::VFMADD231SSZrk_Int:
7826 case X86::VFNMADD231SSZrk_Int:
7827 case X86::VFMSUB132SSZrk_Int:
7828 case X86::VFNMSUB132SSZrk_Int:
7829 case X86::VFMSUB213SSZrk_Int:
7830 case X86::VFNMSUB213SSZrk_Int:
7831 case X86::VFMSUB231SSZrk_Int:
7832 case X86::VFNMSUB231SSZrk_Int:
7833 case X86::VFMADD132SSZrkz_Int:
7834 case X86::VFNMADD132SSZrkz_Int:
7835 case X86::VFMADD213SSZrkz_Int:
7836 case X86::VFNMADD213SSZrkz_Int:
7837 case X86::VFMADD231SSZrkz_Int:
7838 case X86::VFNMADD231SSZrkz_Int:
7839 case X86::VFMSUB132SSZrkz_Int:
7840 case X86::VFNMSUB132SSZrkz_Int:
7841 case X86::VFMSUB213SSZrkz_Int:
7842 case X86::VFNMSUB213SSZrkz_Int:
7843 case X86::VFMSUB231SSZrkz_Int:
7844 case X86::VFNMSUB231SSZrkz_Int:
7845 case X86::VFIXUPIMMSSZrri:
7846 case X86::VFIXUPIMMSSZrrik:
7847 case X86::VFIXUPIMMSSZrrikz:
7848 case X86::VFPCLASSSSZri:
7849 case X86::VFPCLASSSSZrik:
7850 case X86::VGETEXPSSZr:
7851 case X86::VGETEXPSSZrk:
7852 case X86::VGETEXPSSZrkz:
7853 case X86::VGETMANTSSZrri:
7854 case X86::VGETMANTSSZrrik:
7855 case X86::VGETMANTSSZrrikz:
7856 case X86::VRANGESSZrri:
7857 case X86::VRANGESSZrrik:
7858 case X86::VRANGESSZrrikz:
7859 case X86::VRCP14SSZrr:
7860 case X86::VRCP14SSZrrk:
7861 case X86::VRCP14SSZrrkz:
7862 case X86::VRCP28SSZr:
7863 case X86::VRCP28SSZrk:
7864 case X86::VRCP28SSZrkz:
7865 case X86::VREDUCESSZrri:
7866 case X86::VREDUCESSZrrik:
7867 case X86::VREDUCESSZrrikz:
7868 case X86::VRNDSCALESSZrri_Int:
7869 case X86::VRNDSCALESSZrrik_Int:
7870 case X86::VRNDSCALESSZrrikz_Int:
7871 case X86::VRSQRT14SSZrr:
7872 case X86::VRSQRT14SSZrrk:
7873 case X86::VRSQRT14SSZrrkz:
7874 case X86::VRSQRT28SSZr:
7875 case X86::VRSQRT28SSZrk:
7876 case X86::VRSQRT28SSZrkz:
7877 case X86::VSCALEFSSZrr:
7878 case X86::VSCALEFSSZrrk:
7879 case X86::VSCALEFSSZrrkz:
7886 if ((
Opc == X86::MOVSDrm ||
Opc == X86::VMOVSDrm ||
Opc == X86::VMOVSDZrm ||
7887 Opc == X86::MOVSDrm_alt ||
Opc == X86::VMOVSDrm_alt ||
7888 Opc == X86::VMOVSDZrm_alt) &&
7894 case X86::CVTSD2SSrr_Int:
7895 case X86::VCVTSD2SSrr_Int:
7896 case X86::VCVTSD2SSZrr_Int:
7897 case X86::VCVTSD2SSZrrk_Int:
7898 case X86::VCVTSD2SSZrrkz_Int:
7899 case X86::CVTSD2SIrr_Int:
7900 case X86::CVTSD2SI64rr_Int:
7901 case X86::VCVTSD2SIrr_Int:
7902 case X86::VCVTSD2SI64rr_Int:
7903 case X86::VCVTSD2SIZrr_Int:
7904 case X86::VCVTSD2SI64Zrr_Int:
7905 case X86::CVTTSD2SIrr_Int:
7906 case X86::CVTTSD2SI64rr_Int:
7907 case X86::VCVTTSD2SIrr_Int:
7908 case X86::VCVTTSD2SI64rr_Int:
7909 case X86::VCVTTSD2SIZrr_Int:
7910 case X86::VCVTTSD2SI64Zrr_Int:
7911 case X86::VCVTSD2USIZrr_Int:
7912 case X86::VCVTSD2USI64Zrr_Int:
7913 case X86::VCVTTSD2USIZrr_Int:
7914 case X86::VCVTTSD2USI64Zrr_Int:
7915 case X86::ROUNDSDri_Int:
7916 case X86::VROUNDSDri_Int:
7917 case X86::COMISDrr_Int:
7918 case X86::VCOMISDrr_Int:
7919 case X86::VCOMISDZrr_Int:
7920 case X86::UCOMISDrr_Int:
7921 case X86::VUCOMISDrr_Int:
7922 case X86::VUCOMISDZrr_Int:
7923 case X86::ADDSDrr_Int:
7924 case X86::VADDSDrr_Int:
7925 case X86::VADDSDZrr_Int:
7926 case X86::CMPSDrri_Int:
7927 case X86::VCMPSDrri_Int:
7928 case X86::VCMPSDZrri_Int:
7929 case X86::DIVSDrr_Int:
7930 case X86::VDIVSDrr_Int:
7931 case X86::VDIVSDZrr_Int:
7932 case X86::MAXSDrr_Int:
7933 case X86::VMAXSDrr_Int:
7934 case X86::VMAXSDZrr_Int:
7935 case X86::MINSDrr_Int:
7936 case X86::VMINSDrr_Int:
7937 case X86::VMINSDZrr_Int:
7938 case X86::MULSDrr_Int:
7939 case X86::VMULSDrr_Int:
7940 case X86::VMULSDZrr_Int:
7941 case X86::SQRTSDr_Int:
7942 case X86::VSQRTSDr_Int:
7943 case X86::VSQRTSDZr_Int:
7944 case X86::SUBSDrr_Int:
7945 case X86::VSUBSDrr_Int:
7946 case X86::VSUBSDZrr_Int:
7947 case X86::VADDSDZrrk_Int:
7948 case X86::VADDSDZrrkz_Int:
7949 case X86::VCMPSDZrrik_Int:
7950 case X86::VDIVSDZrrk_Int:
7951 case X86::VDIVSDZrrkz_Int:
7952 case X86::VMAXSDZrrk_Int:
7953 case X86::VMAXSDZrrkz_Int:
7954 case X86::VMINSDZrrk_Int:
7955 case X86::VMINSDZrrkz_Int:
7956 case X86::VMULSDZrrk_Int:
7957 case X86::VMULSDZrrkz_Int:
7958 case X86::VSQRTSDZrk_Int:
7959 case X86::VSQRTSDZrkz_Int:
7960 case X86::VSUBSDZrrk_Int:
7961 case X86::VSUBSDZrrkz_Int:
7962 case X86::VFMADDSD4rr_Int:
7963 case X86::VFNMADDSD4rr_Int:
7964 case X86::VFMSUBSD4rr_Int:
7965 case X86::VFNMSUBSD4rr_Int:
7966 case X86::VFMADD132SDr_Int:
7967 case X86::VFNMADD132SDr_Int:
7968 case X86::VFMADD213SDr_Int:
7969 case X86::VFNMADD213SDr_Int:
7970 case X86::VFMADD231SDr_Int:
7971 case X86::VFNMADD231SDr_Int:
7972 case X86::VFMSUB132SDr_Int:
7973 case X86::VFNMSUB132SDr_Int:
7974 case X86::VFMSUB213SDr_Int:
7975 case X86::VFNMSUB213SDr_Int:
7976 case X86::VFMSUB231SDr_Int:
7977 case X86::VFNMSUB231SDr_Int:
7978 case X86::VFMADD132SDZr_Int:
7979 case X86::VFNMADD132SDZr_Int:
7980 case X86::VFMADD213SDZr_Int:
7981 case X86::VFNMADD213SDZr_Int:
7982 case X86::VFMADD231SDZr_Int:
7983 case X86::VFNMADD231SDZr_Int:
7984 case X86::VFMSUB132SDZr_Int:
7985 case X86::VFNMSUB132SDZr_Int:
7986 case X86::VFMSUB213SDZr_Int:
7987 case X86::VFNMSUB213SDZr_Int:
7988 case X86::VFMSUB231SDZr_Int:
7989 case X86::VFNMSUB231SDZr_Int:
7990 case X86::VFMADD132SDZrk_Int:
7991 case X86::VFNMADD132SDZrk_Int:
7992 case X86::VFMADD213SDZrk_Int:
7993 case X86::VFNMADD213SDZrk_Int:
7994 case X86::VFMADD231SDZrk_Int:
7995 case X86::VFNMADD231SDZrk_Int:
7996 case X86::VFMSUB132SDZrk_Int:
7997 case X86::VFNMSUB132SDZrk_Int:
7998 case X86::VFMSUB213SDZrk_Int:
7999 case X86::VFNMSUB213SDZrk_Int:
8000 case X86::VFMSUB231SDZrk_Int:
8001 case X86::VFNMSUB231SDZrk_Int:
8002 case X86::VFMADD132SDZrkz_Int:
8003 case X86::VFNMADD132SDZrkz_Int:
8004 case X86::VFMADD213SDZrkz_Int:
8005 case X86::VFNMADD213SDZrkz_Int:
8006 case X86::VFMADD231SDZrkz_Int:
8007 case X86::VFNMADD231SDZrkz_Int:
8008 case X86::VFMSUB132SDZrkz_Int:
8009 case X86::VFNMSUB132SDZrkz_Int:
8010 case X86::VFMSUB213SDZrkz_Int:
8011 case X86::VFNMSUB213SDZrkz_Int:
8012 case X86::VFMSUB231SDZrkz_Int:
8013 case X86::VFNMSUB231SDZrkz_Int:
8014 case X86::VFIXUPIMMSDZrri:
8015 case X86::VFIXUPIMMSDZrrik:
8016 case X86::VFIXUPIMMSDZrrikz:
8017 case X86::VFPCLASSSDZri:
8018 case X86::VFPCLASSSDZrik:
8019 case X86::VGETEXPSDZr:
8020 case X86::VGETEXPSDZrk:
8021 case X86::VGETEXPSDZrkz:
8022 case X86::VGETMANTSDZrri:
8023 case X86::VGETMANTSDZrrik:
8024 case X86::VGETMANTSDZrrikz:
8025 case X86::VRANGESDZrri:
8026 case X86::VRANGESDZrrik:
8027 case X86::VRANGESDZrrikz:
8028 case X86::VRCP14SDZrr:
8029 case X86::VRCP14SDZrrk:
8030 case X86::VRCP14SDZrrkz:
8031 case X86::VRCP28SDZr:
8032 case X86::VRCP28SDZrk:
8033 case X86::VRCP28SDZrkz:
8034 case X86::VREDUCESDZrri:
8035 case X86::VREDUCESDZrrik:
8036 case X86::VREDUCESDZrrikz:
8037 case X86::VRNDSCALESDZrri_Int:
8038 case X86::VRNDSCALESDZrrik_Int:
8039 case X86::VRNDSCALESDZrrikz_Int:
8040 case X86::VRSQRT14SDZrr:
8041 case X86::VRSQRT14SDZrrk:
8042 case X86::VRSQRT14SDZrrkz:
8043 case X86::VRSQRT28SDZr:
8044 case X86::VRSQRT28SDZrk:
8045 case X86::VRSQRT28SDZrkz:
8046 case X86::VSCALEFSDZrr:
8047 case X86::VSCALEFSDZrrk:
8048 case X86::VSCALEFSDZrrkz:
8055 if ((
Opc == X86::VMOVSHZrm ||
Opc == X86::VMOVSHZrm_alt) &&
RegSize > 16) {
8060 case X86::VADDSHZrr_Int:
8061 case X86::VCMPSHZrri_Int:
8062 case X86::VDIVSHZrr_Int:
8063 case X86::VMAXSHZrr_Int:
8064 case X86::VMINSHZrr_Int:
8065 case X86::VMULSHZrr_Int:
8066 case X86::VSUBSHZrr_Int:
8067 case X86::VADDSHZrrk_Int:
8068 case X86::VADDSHZrrkz_Int:
8069 case X86::VCMPSHZrrik_Int:
8070 case X86::VDIVSHZrrk_Int:
8071 case X86::VDIVSHZrrkz_Int:
8072 case X86::VMAXSHZrrk_Int:
8073 case X86::VMAXSHZrrkz_Int:
8074 case X86::VMINSHZrrk_Int:
8075 case X86::VMINSHZrrkz_Int:
8076 case X86::VMULSHZrrk_Int:
8077 case X86::VMULSHZrrkz_Int:
8078 case X86::VSUBSHZrrk_Int:
8079 case X86::VSUBSHZrrkz_Int:
8080 case X86::VFMADD132SHZr_Int:
8081 case X86::VFNMADD132SHZr_Int:
8082 case X86::VFMADD213SHZr_Int:
8083 case X86::VFNMADD213SHZr_Int:
8084 case X86::VFMADD231SHZr_Int:
8085 case X86::VFNMADD231SHZr_Int:
8086 case X86::VFMSUB132SHZr_Int:
8087 case X86::VFNMSUB132SHZr_Int:
8088 case X86::VFMSUB213SHZr_Int:
8089 case X86::VFNMSUB213SHZr_Int:
8090 case X86::VFMSUB231SHZr_Int:
8091 case X86::VFNMSUB231SHZr_Int:
8092 case X86::VFMADD132SHZrk_Int:
8093 case X86::VFNMADD132SHZrk_Int:
8094 case X86::VFMADD213SHZrk_Int:
8095 case X86::VFNMADD213SHZrk_Int:
8096 case X86::VFMADD231SHZrk_Int:
8097 case X86::VFNMADD231SHZrk_Int:
8098 case X86::VFMSUB132SHZrk_Int:
8099 case X86::VFNMSUB132SHZrk_Int:
8100 case X86::VFMSUB213SHZrk_Int:
8101 case X86::VFNMSUB213SHZrk_Int:
8102 case X86::VFMSUB231SHZrk_Int:
8103 case X86::VFNMSUB231SHZrk_Int:
8104 case X86::VFMADD132SHZrkz_Int:
8105 case X86::VFNMADD132SHZrkz_Int:
8106 case X86::VFMADD213SHZrkz_Int:
8107 case X86::VFNMADD213SHZrkz_Int:
8108 case X86::VFMADD231SHZrkz_Int:
8109 case X86::VFNMADD231SHZrkz_Int:
8110 case X86::VFMSUB132SHZrkz_Int:
8111 case X86::VFNMSUB132SHZrkz_Int:
8112 case X86::VFMSUB213SHZrkz_Int:
8113 case X86::VFNMSUB213SHZrkz_Int:
8114 case X86::VFMSUB231SHZrkz_Int:
8115 case X86::VFNMSUB231SHZrkz_Int:
8139 return RC == &X86::VK2WMRegClass || RC == &X86::VK4WMRegClass ||
8140 RC == &X86::VK8WMRegClass || RC == &X86::VK16WMRegClass ||
8141 RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass;
8150 bool HasSameMask =
false;
8151 for (
unsigned I = 1, E =
MI.getDesc().getNumOperands();
I < E; ++
I) {
8153 if (
Op.isReg() &&
Op.getReg() == MaskReg) {
8165 for (
auto Op :
Ops) {
8166 if (
MI.getOperand(
Op).getSubReg())
8203 case X86::AVX512_512_SET0:
8204 case X86::AVX512_512_SETALLONES:
8205 Alignment =
Align(64);
8207 case X86::AVX2_SETALLONES:
8208 case X86::AVX1_SETALLONES:
8210 case X86::AVX512_256_SET0:
8211 case X86::AVX512_256_SETALLONES:
8212 Alignment =
Align(32);
8215 case X86::V_SETALLONES:
8216 case X86::AVX512_128_SET0:
8217 case X86::FsFLD0F128:
8218 case X86::AVX512_FsFLD0F128:
8219 case X86::AVX512_128_SETALLONES:
8220 Alignment =
Align(16);
8224 case X86::AVX512_FsFLD0SD:
8225 Alignment =
Align(8);
8228 case X86::AVX512_FsFLD0SS:
8229 Alignment =
Align(4);
8232 case X86::AVX512_FsFLD0SH:
8233 Alignment =
Align(2);
8238 if (
Ops.size() == 2 &&
Ops[0] == 0 &&
Ops[1] == 1) {
8239 unsigned NewOpc = 0;
8240 switch (
MI.getOpcode()) {
8244 NewOpc = X86::CMP8ri;
8247 NewOpc = X86::CMP16ri;
8250 NewOpc = X86::CMP32ri;
8253 NewOpc = X86::CMP64ri32;
8257 MI.setDesc(
get(NewOpc));
8258 MI.getOperand(1).ChangeToImmediate(0);
8259 }
else if (
Ops.size() != 1)
8271 case X86::V_SETALLONES:
8272 case X86::AVX2_SETALLONES:
8273 case X86::AVX1_SETALLONES:
8275 case X86::AVX512_128_SET0:
8276 case X86::AVX512_256_SET0:
8277 case X86::AVX512_512_SET0:
8278 case X86::AVX512_128_SETALLONES:
8279 case X86::AVX512_256_SETALLONES:
8280 case X86::AVX512_512_SETALLONES:
8282 case X86::AVX512_FsFLD0SH:
8284 case X86::AVX512_FsFLD0SD:
8286 case X86::AVX512_FsFLD0SS:
8287 case X86::FsFLD0F128:
8288 case X86::AVX512_FsFLD0F128: {
8297 unsigned PICBase = 0;
8300 if (Subtarget.is64Bit()) {
8313 bool IsAllOnes =
false;
8316 case X86::AVX512_FsFLD0SS:
8320 case X86::AVX512_FsFLD0SD:
8323 case X86::FsFLD0F128:
8324 case X86::AVX512_FsFLD0F128:
8328 case X86::AVX512_FsFLD0SH:
8331 case X86::AVX512_512_SETALLONES:
8334 case X86::AVX512_512_SET0:
8338 case X86::AVX1_SETALLONES:
8339 case X86::AVX2_SETALLONES:
8340 case X86::AVX512_256_SETALLONES:
8343 case X86::AVX512_256_SET0:
8353 case X86::V_SETALLONES:
8354 case X86::AVX512_128_SETALLONES:
8358 case X86::AVX512_128_SET0:
8376 case X86::VPBROADCASTBZ128rm:
8377 case X86::VPBROADCASTBZ256rm:
8378 case X86::VPBROADCASTBZrm:
8379 case X86::VBROADCASTF32X2Z256rm:
8380 case X86::VBROADCASTF32X2Zrm:
8381 case X86::VBROADCASTI32X2Z128rm:
8382 case X86::VBROADCASTI32X2Z256rm:
8383 case X86::VBROADCASTI32X2Zrm:
8387#define FOLD_BROADCAST(SIZE) \
8388 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, \
8389 LoadMI.operands_begin() + NumOps); \
8390 return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, SIZE, \
8392 case X86::VPBROADCASTWZ128rm:
8393 case X86::VPBROADCASTWZ256rm:
8394 case X86::VPBROADCASTWZrm:
8396 case X86::VPBROADCASTDZ128rm:
8397 case X86::VPBROADCASTDZ256rm:
8398 case X86::VPBROADCASTDZrm:
8399 case X86::VBROADCASTSSZ128rm:
8400 case X86::VBROADCASTSSZ256rm:
8401 case X86::VBROADCASTSSZrm:
8403 case X86::VPBROADCASTQZ128rm:
8404 case X86::VPBROADCASTQZ256rm:
8405 case X86::VPBROADCASTQZrm:
8406 case X86::VBROADCASTSDZ256rm:
8407 case X86::VBROADCASTSDZrm:
8420 0, Alignment,
true);
8427 unsigned BitsSize,
bool AllowCommute)
const {
8431 ?
fuseInst(MF,
I->DstOp, OpNum, MOs, InsertPt,
MI, *
this)
8437 unsigned CommuteOpIdx2 = commuteOperandsForFold(
MI, OpNum);
8438 if (CommuteOpIdx2 == OpNum) {
8443 foldMemoryBroadcast(MF,
MI, CommuteOpIdx2, MOs, InsertPt, BitsSize,
8448 commuteInstruction(
MI,
false, OpNum, CommuteOpIdx2);
8463 if (!MMO->isStore()) {
8481 if (!MMO->isStore())
8484 if (!MMO->isLoad()) {
8502 assert((SpillSize == 64 || STI.hasVLX()) &&
8503 "Can't broadcast less than 64 bytes without AVX512VL!");
8505#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64) \
8507 switch (SpillSize) { \
8509 llvm_unreachable("Unknown spill size"); \
8543 unsigned Opc =
I->DstOp;
8547 if (UnfoldLoad && !FoldedLoad)
8549 UnfoldLoad &= FoldedLoad;
8550 if (UnfoldStore && !FoldedStore)
8552 UnfoldStore &= FoldedStore;
8559 if (!
MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
8560 Subtarget.isUnalignedMem16Slow())
8569 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
8573 else if (
Op.isReg() &&
Op.isImplicit())
8589 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8590 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8634 case X86::CMP64ri32:
8645 case X86::CMP64ri32:
8646 NewOpc = X86::TEST64rr;
8649 NewOpc = X86::TEST32rr;
8652 NewOpc = X86::TEST16rr;
8655 NewOpc = X86::TEST8rr;
8669 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*DstRC), 16);
8670 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8686 if (!
N->isMachineOpcode())
8692 unsigned Opc =
I->DstOp;
8700 unsigned NumDefs =
MCID.NumDefs;
8701 std::vector<SDValue> AddrOps;
8702 std::vector<SDValue> BeforeOps;
8703 std::vector<SDValue> AfterOps;
8705 unsigned NumOps =
N->getNumOperands();
8706 for (
unsigned i = 0; i !=
NumOps - 1; ++i) {
8709 AddrOps.push_back(
Op);
8710 else if (i < Index - NumDefs)
8711 BeforeOps.push_back(
Op);
8712 else if (i > Index - NumDefs)
8713 AfterOps.push_back(
Op);
8716 AddrOps.push_back(Chain);
8721 EVT VT = *
TRI.legalclasstypes_begin(*RC);
8723 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8724 Subtarget.isUnalignedMem16Slow())
8734 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8735 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8747 std::vector<EVT> VTs;
8749 if (
MCID.getNumDefs() > 0) {
8751 VTs.push_back(*
TRI.legalclasstypes_begin(*DstRC));
8753 for (
unsigned i = 0, e =
N->getNumValues(); i != e; ++i) {
8754 EVT VT =
N->getValueType(i);
8755 if (VT != MVT::Other && i >= (
unsigned)
MCID.getNumDefs())
8759 BeforeOps.push_back(
SDValue(Load, 0));
8765 case X86::CMP64ri32:
8773 case X86::CMP64ri32:
8774 Opc = X86::TEST64rr;
8777 Opc = X86::TEST32rr;
8780 Opc = X86::TEST16rr;
8786 BeforeOps[1] = BeforeOps[0];
8795 AddrOps.push_back(
SDValue(NewNode, 0));
8796 AddrOps.push_back(Chain);
8798 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8799 Subtarget.isUnalignedMem16Slow())
8804 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8805 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8808 dl, MVT::Other, AddrOps);
8821 unsigned *LoadRegIndex)
const {
8827 if (UnfoldLoad && !FoldedLoad)
8829 if (UnfoldStore && !FoldedStore)
8838 int64_t &Offset2)
const {
8842 auto IsLoadOpcode = [&](
unsigned Opcode) {
8854 case X86::MOVSSrm_alt:
8856 case X86::MOVSDrm_alt:
8857 case X86::MMX_MOVD64rm:
8858 case X86::MMX_MOVQ64rm:
8867 case X86::VMOVSSrm_alt:
8869 case X86::VMOVSDrm_alt:
8870 case X86::VMOVAPSrm:
8871 case X86::VMOVUPSrm:
8872 case X86::VMOVAPDrm:
8873 case X86::VMOVUPDrm:
8874 case X86::VMOVDQArm:
8875 case X86::VMOVDQUrm:
8876 case X86::VMOVAPSYrm:
8877 case X86::VMOVUPSYrm:
8878 case X86::VMOVAPDYrm:
8879 case X86::VMOVUPDYrm:
8880 case X86::VMOVDQAYrm:
8881 case X86::VMOVDQUYrm:
8883 case X86::VMOVSSZrm:
8884 case X86::VMOVSSZrm_alt:
8885 case X86::VMOVSDZrm:
8886 case X86::VMOVSDZrm_alt:
8887 case X86::VMOVAPSZ128rm:
8888 case X86::VMOVUPSZ128rm:
8889 case X86::VMOVAPSZ128rm_NOVLX:
8890 case X86::VMOVUPSZ128rm_NOVLX:
8891 case X86::VMOVAPDZ128rm:
8892 case X86::VMOVUPDZ128rm:
8893 case X86::VMOVDQU8Z128rm:
8894 case X86::VMOVDQU16Z128rm:
8895 case X86::VMOVDQA32Z128rm:
8896 case X86::VMOVDQU32Z128rm:
8897 case X86::VMOVDQA64Z128rm:
8898 case X86::VMOVDQU64Z128rm:
8899 case X86::VMOVAPSZ256rm:
8900 case X86::VMOVUPSZ256rm:
8901 case X86::VMOVAPSZ256rm_NOVLX:
8902 case X86::VMOVUPSZ256rm_NOVLX:
8903 case X86::VMOVAPDZ256rm:
8904 case X86::VMOVUPDZ256rm:
8905 case X86::VMOVDQU8Z256rm:
8906 case X86::VMOVDQU16Z256rm:
8907 case X86::VMOVDQA32Z256rm:
8908 case X86::VMOVDQU32Z256rm:
8909 case X86::VMOVDQA64Z256rm:
8910 case X86::VMOVDQU64Z256rm:
8911 case X86::VMOVAPSZrm:
8912 case X86::VMOVUPSZrm:
8913 case X86::VMOVAPDZrm:
8914 case X86::VMOVUPDZrm:
8915 case X86::VMOVDQU8Zrm:
8916 case X86::VMOVDQU16Zrm:
8917 case X86::VMOVDQA32Zrm:
8918 case X86::VMOVDQU32Zrm:
8919 case X86::VMOVDQA64Zrm:
8920 case X86::VMOVDQU64Zrm:
8922 case X86::KMOVBkm_EVEX:
8924 case X86::KMOVWkm_EVEX:
8926 case X86::KMOVDkm_EVEX:
8928 case X86::KMOVQkm_EVEX:
8938 auto HasSameOp = [&](
int I) {
8954 if (!Disp1 || !Disp2)
8957 Offset1 = Disp1->getSExtValue();
8958 Offset2 = Disp2->getSExtValue();
8963 int64_t Offset1, int64_t Offset2,
8964 unsigned NumLoads)
const {
8965 assert(Offset2 > Offset1);
8966 if ((Offset2 - Offset1) / 8 > 64)
8980 case X86::MMX_MOVD64rm:
8981 case X86::MMX_MOVQ64rm:
8990 if (Subtarget.is64Bit()) {
8993 }
else if (NumLoads) {
9016 unsigned Opcode =
MI.getOpcode();
9017 if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
9018 Opcode == X86::PLDTILECFGV)
9031 assert(
Cond.size() == 1 &&
"Invalid X86 branch condition!");
9041 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
9042 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
9043 RC == &X86::RFP80RegClass);
9056 return GlobalBaseReg;
9061 GlobalBaseReg = RegInfo.createVirtualRegister(
9062 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
9064 return GlobalBaseReg;
9072 for (
const uint16_t(&Row)[3] : Table)
9073 if (Row[domain - 1] == opcode)
9081 for (
const uint16_t(&Row)[4] : Table)
9082 if (Row[domain - 1] == opcode || (domain == 3 && Row[3] == opcode))
9089 unsigned NewWidth,
unsigned *pNewMask =
nullptr) {
9090 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
9091 "Illegal blend mask scale");
9092 unsigned NewMask = 0;
9094 if ((OldWidth % NewWidth) == 0) {
9095 unsigned Scale = OldWidth / NewWidth;
9096 unsigned SubMask = (1u << Scale) - 1;
9097 for (
unsigned i = 0; i != NewWidth; ++i) {
9098 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
9100 NewMask |= (1u << i);
9101 else if (
Sub != 0x0)
9105 unsigned Scale = NewWidth / OldWidth;
9106 unsigned SubMask = (1u << Scale) - 1;
9107 for (
unsigned i = 0; i != OldWidth; ++i) {
9108 if (OldMask & (1 << i)) {
9109 NewMask |= (SubMask << (i * Scale));
9115 *pNewMask = NewMask;
9120 unsigned Opcode =
MI.getOpcode();
9121 unsigned NumOperands =
MI.getDesc().getNumOperands();
9123 auto GetBlendDomains = [&](
unsigned ImmWidth,
bool Is256) {
9125 if (
MI.getOperand(NumOperands - 1).isImm()) {
9126 unsigned Imm =
MI.getOperand(NumOperands - 1).getImm();
9128 validDomains |= 0x2;
9130 validDomains |= 0x4;
9131 if (!Is256 || Subtarget.hasAVX2())
9132 validDomains |= 0x8;
9134 return validDomains;
9138 case X86::BLENDPDrmi:
9139 case X86::BLENDPDrri:
9140 case X86::VBLENDPDrmi:
9141 case X86::VBLENDPDrri:
9142 return GetBlendDomains(2,
false);
9143 case X86::VBLENDPDYrmi:
9144 case X86::VBLENDPDYrri:
9145 return GetBlendDomains(4,
true);
9146 case X86::BLENDPSrmi:
9147 case X86::BLENDPSrri:
9148 case X86::VBLENDPSrmi:
9149 case X86::VBLENDPSrri:
9150 case X86::VPBLENDDrmi:
9151 case X86::VPBLENDDrri:
9152 return GetBlendDomains(4,
false);
9153 case X86::VBLENDPSYrmi:
9154 case X86::VBLENDPSYrri:
9155 case X86::VPBLENDDYrmi:
9156 case X86::VPBLENDDYrri:
9157 return GetBlendDomains(8,
true);
9158 case X86::PBLENDWrmi:
9159 case X86::PBLENDWrri:
9160 case X86::VPBLENDWrmi:
9161 case X86::VPBLENDWrri:
9163 case X86::VPBLENDWYrmi:
9164 case X86::VPBLENDWYrri:
9165 return GetBlendDomains(8,
false);
9166 case X86::VPANDDZ128rr:
9167 case X86::VPANDDZ128rm:
9168 case X86::VPANDDZ256rr:
9169 case X86::VPANDDZ256rm:
9170 case X86::VPANDQZ128rr:
9171 case X86::VPANDQZ128rm:
9172 case X86::VPANDQZ256rr:
9173 case X86::VPANDQZ256rm:
9174 case X86::VPANDNDZ128rr:
9175 case X86::VPANDNDZ128rm:
9176 case X86::VPANDNDZ256rr:
9177 case X86::VPANDNDZ256rm:
9178 case X86::VPANDNQZ128rr:
9179 case X86::VPANDNQZ128rm:
9180 case X86::VPANDNQZ256rr:
9181 case X86::VPANDNQZ256rm:
9182 case X86::VPORDZ128rr:
9183 case X86::VPORDZ128rm:
9184 case X86::VPORDZ256rr:
9185 case X86::VPORDZ256rm:
9186 case X86::VPORQZ128rr:
9187 case X86::VPORQZ128rm:
9188 case X86::VPORQZ256rr:
9189 case X86::VPORQZ256rm:
9190 case X86::VPXORDZ128rr:
9191 case X86::VPXORDZ128rm:
9192 case X86::VPXORDZ256rr:
9193 case X86::VPXORDZ256rm:
9194 case X86::VPXORQZ128rr:
9195 case X86::VPXORQZ128rm:
9196 case X86::VPXORQZ256rr:
9197 case X86::VPXORQZ256rm:
9200 if (Subtarget.hasDQI())
9203 if (RI.getEncodingValue(
MI.getOperand(0).getReg()) >= 16)
9205 if (RI.getEncodingValue(
MI.getOperand(1).getReg()) >= 16)
9208 if (NumOperands == 3 &&
9209 RI.getEncodingValue(
MI.getOperand(2).getReg()) >= 16)
9214 case X86::MOVHLPSrr:
9221 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg() &&
9222 MI.getOperand(0).getSubReg() == 0 &&
9223 MI.getOperand(1).getSubReg() == 0 &&
MI.getOperand(2).getSubReg() == 0)
9226 case X86::SHUFPDrri:
9232#include "X86ReplaceableInstrs.def"
9238 assert(dom &&
"Not an SSE instruction");
9240 unsigned Opcode =
MI.getOpcode();
9241 unsigned NumOperands =
MI.getDesc().getNumOperands();
9243 auto SetBlendDomain = [&](
unsigned ImmWidth,
bool Is256) {
9244 if (
MI.getOperand(NumOperands - 1).isImm()) {
9245 unsigned Imm =
MI.getOperand(NumOperands - 1).getImm() & 255;
9246 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
9247 unsigned NewImm = Imm;
9249 const uint16_t *table =
lookup(Opcode, dom, ReplaceableBlendInstrs);
9251 table =
lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9255 }
else if (
Domain == 2) {
9257 }
else if (
Domain == 3) {
9258 if (Subtarget.hasAVX2()) {
9260 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
9261 table =
lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9265 assert(!Is256 &&
"128-bit vector expected");
9270 assert(table && table[
Domain - 1] &&
"Unknown domain op");
9272 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
9278 case X86::BLENDPDrmi:
9279 case X86::BLENDPDrri:
9280 case X86::VBLENDPDrmi:
9281 case X86::VBLENDPDrri:
9282 return SetBlendDomain(2,
false);
9283 case X86::VBLENDPDYrmi:
9284 case X86::VBLENDPDYrri:
9285 return SetBlendDomain(4,
true);
9286 case X86::BLENDPSrmi:
9287 case X86::BLENDPSrri:
9288 case X86::VBLENDPSrmi:
9289 case X86::VBLENDPSrri:
9290 case X86::VPBLENDDrmi:
9291 case X86::VPBLENDDrri:
9292 return SetBlendDomain(4,
false);
9293 case X86::VBLENDPSYrmi:
9294 case X86::VBLENDPSYrri:
9295 case X86::VPBLENDDYrmi:
9296 case X86::VPBLENDDYrri:
9297 return SetBlendDomain(8,
true);
9298 case X86::PBLENDWrmi:
9299 case X86::PBLENDWrri:
9300 case X86::VPBLENDWrmi:
9301 case X86::VPBLENDWrri:
9302 return SetBlendDomain(8,
false);
9303 case X86::VPBLENDWYrmi:
9304 case X86::VPBLENDWYrri:
9305 return SetBlendDomain(16,
true);
9306 case X86::VPANDDZ128rr:
9307 case X86::VPANDDZ128rm:
9308 case X86::VPANDDZ256rr:
9309 case X86::VPANDDZ256rm:
9310 case X86::VPANDQZ128rr:
9311 case X86::VPANDQZ128rm:
9312 case X86::VPANDQZ256rr:
9313 case X86::VPANDQZ256rm:
9314 case X86::VPANDNDZ128rr:
9315 case X86::VPANDNDZ128rm:
9316 case X86::VPANDNDZ256rr:
9317 case X86::VPANDNDZ256rm:
9318 case X86::VPANDNQZ128rr:
9319 case X86::VPANDNQZ128rm:
9320 case X86::VPANDNQZ256rr:
9321 case X86::VPANDNQZ256rm:
9322 case X86::VPORDZ128rr:
9323 case X86::VPORDZ128rm:
9324 case X86::VPORDZ256rr:
9325 case X86::VPORDZ256rm:
9326 case X86::VPORQZ128rr:
9327 case X86::VPORQZ128rm:
9328 case X86::VPORQZ256rr:
9329 case X86::VPORQZ256rm:
9330 case X86::VPXORDZ128rr:
9331 case X86::VPXORDZ128rm:
9332 case X86::VPXORDZ256rr:
9333 case X86::VPXORDZ256rm:
9334 case X86::VPXORQZ128rr:
9335 case X86::VPXORQZ128rm:
9336 case X86::VPXORQZ256rr:
9337 case X86::VPXORQZ256rm: {
9339 if (Subtarget.hasDQI())
9343 lookupAVX512(
MI.getOpcode(), dom, ReplaceableCustomAVX512LogicInstrs);
9344 assert(table &&
"Instruction not found in table?");
9347 if (
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9352 case X86::UNPCKHPDrr:
9353 case X86::MOVHLPSrr:
9356 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg() &&
9357 MI.getOperand(0).getSubReg() == 0 &&
9358 MI.getOperand(1).getSubReg() == 0 &&
9359 MI.getOperand(2).getSubReg() == 0) {
9360 commuteInstruction(
MI,
false);
9364 if (Opcode == X86::MOVHLPSrr)
9367 case X86::SHUFPDrri: {
9369 unsigned Imm =
MI.getOperand(3).getImm();
9370 unsigned NewImm = 0x44;
9375 MI.getOperand(3).setImm(NewImm);
9376 MI.setDesc(
get(X86::SHUFPSrri));
9384std::pair<uint16_t, uint16_t>
9387 unsigned opcode =
MI.getOpcode();
9393 return std::make_pair(domain, validDomains);
9395 if (
lookup(opcode, domain, ReplaceableInstrs)) {
9397 }
else if (
lookup(opcode, domain, ReplaceableInstrsAVX2)) {
9398 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
9399 }
else if (
lookup(opcode, domain, ReplaceableInstrsFP)) {
9401 }
else if (
lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
9404 if (!Subtarget.hasAVX2())
9405 return std::make_pair(0, 0);
9407 }
else if (
lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
9409 }
else if (Subtarget.hasDQI() &&
9410 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQ)) {
9412 }
else if (Subtarget.hasDQI()) {
9414 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQMasked)) {
9415 if (domain == 1 || (domain == 3 && table[3] == opcode))
9422 return std::make_pair(domain, validDomains);
9428 assert(dom &&
"Not an SSE instruction");
9437 "256-bit vector operations only available in AVX2");
9438 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsAVX2);
9441 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsFP);
9443 "Can only select PackedSingle or PackedDouble");
9446 assert(Subtarget.hasAVX2() &&
9447 "256-bit insert/extract only available in AVX2");
9448 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
9451 assert(Subtarget.hasAVX512() &&
"Requires AVX-512");
9452 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512);
9454 if (table &&
Domain == 3 && table[3] ==
MI.getOpcode())
9458 assert((Subtarget.hasDQI() ||
Domain >= 3) &&
"Requires AVX-512DQ");
9459 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
9462 if (table &&
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9466 assert((Subtarget.hasDQI() ||
Domain >= 3) &&
"Requires AVX-512DQ");
9467 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
9468 if (table &&
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9471 assert(table &&
"Cannot change domain");
9497 case X86::DIVSDrm_Int:
9499 case X86::DIVSDrr_Int:
9501 case X86::DIVSSrm_Int:
9503 case X86::DIVSSrr_Int:
9509 case X86::SQRTSDm_Int:
9511 case X86::SQRTSDr_Int:
9513 case X86::SQRTSSm_Int:
9515 case X86::SQRTSSr_Int:
9519 case X86::VDIVPDYrm:
9520 case X86::VDIVPDYrr:
9523 case X86::VDIVPSYrm:
9524 case X86::VDIVPSYrr:
9526 case X86::VDIVSDrm_Int:
9528 case X86::VDIVSDrr_Int:
9530 case X86::VDIVSSrm_Int:
9532 case X86::VDIVSSrr_Int:
9535 case X86::VSQRTPDYm:
9536 case X86::VSQRTPDYr:
9539 case X86::VSQRTPSYm:
9540 case X86::VSQRTPSYr:
9542 case X86::VSQRTSDm_Int:
9544 case X86::VSQRTSDr_Int:
9546 case X86::VSQRTSSm_Int:
9548 case X86::VSQRTSSr_Int:
9550 case X86::VDIVPDZ128rm:
9551 case X86::VDIVPDZ128rmb:
9552 case X86::VDIVPDZ128rmbk:
9553 case X86::VDIVPDZ128rmbkz:
9554 case X86::VDIVPDZ128rmk:
9555 case X86::VDIVPDZ128rmkz:
9556 case X86::VDIVPDZ128rr:
9557 case X86::VDIVPDZ128rrk:
9558 case X86::VDIVPDZ128rrkz:
9559 case X86::VDIVPDZ256rm:
9560 case X86::VDIVPDZ256rmb:
9561 case X86::VDIVPDZ256rmbk:
9562 case X86::VDIVPDZ256rmbkz:
9563 case X86::VDIVPDZ256rmk:
9564 case X86::VDIVPDZ256rmkz:
9565 case X86::VDIVPDZ256rr:
9566 case X86::VDIVPDZ256rrk:
9567 case X86::VDIVPDZ256rrkz:
9568 case X86::VDIVPDZrrb:
9569 case X86::VDIVPDZrrbk:
9570 case X86::VDIVPDZrrbkz:
9571 case X86::VDIVPDZrm:
9572 case X86::VDIVPDZrmb:
9573 case X86::VDIVPDZrmbk:
9574 case X86::VDIVPDZrmbkz:
9575 case X86::VDIVPDZrmk:
9576 case X86::VDIVPDZrmkz:
9577 case X86::VDIVPDZrr:
9578 case X86::VDIVPDZrrk:
9579 case X86::VDIVPDZrrkz:
9580 case X86::VDIVPSZ128rm:
9581 case X86::VDIVPSZ128rmb:
9582 case X86::VDIVPSZ128rmbk:
9583 case X86::VDIVPSZ128rmbkz:
9584 case X86::VDIVPSZ128rmk:
9585 case X86::VDIVPSZ128rmkz:
9586 case X86::VDIVPSZ128rr:
9587 case X86::VDIVPSZ128rrk:
9588 case X86::VDIVPSZ128rrkz:
9589 case X86::VDIVPSZ256rm:
9590 case X86::VDIVPSZ256rmb:
9591 case X86::VDIVPSZ256rmbk:
9592 case X86::VDIVPSZ256rmbkz:
9593 case X86::VDIVPSZ256rmk:
9594 case X86::VDIVPSZ256rmkz:
9595 case X86::VDIVPSZ256rr:
9596 case X86::VDIVPSZ256rrk:
9597 case X86::VDIVPSZ256rrkz:
9598 case X86::VDIVPSZrrb:
9599 case X86::VDIVPSZrrbk:
9600 case X86::VDIVPSZrrbkz:
9601 case X86::VDIVPSZrm:
9602 case X86::VDIVPSZrmb:
9603 case X86::VDIVPSZrmbk:
9604 case X86::VDIVPSZrmbkz:
9605 case X86::VDIVPSZrmk:
9606 case X86::VDIVPSZrmkz:
9607 case X86::VDIVPSZrr:
9608 case X86::VDIVPSZrrk:
9609 case X86::VDIVPSZrrkz:
9610 case X86::VDIVSDZrm:
9611 case X86::VDIVSDZrr:
9612 case X86::VDIVSDZrm_Int:
9613 case X86::VDIVSDZrmk_Int:
9614 case X86::VDIVSDZrmkz_Int:
9615 case X86::VDIVSDZrr_Int:
9616 case X86::VDIVSDZrrk_Int:
9617 case X86::VDIVSDZrrkz_Int:
9618 case X86::VDIVSDZrrb_Int:
9619 case X86::VDIVSDZrrbk_Int:
9620 case X86::VDIVSDZrrbkz_Int:
9621 case X86::VDIVSSZrm:
9622 case X86::VDIVSSZrr:
9623 case X86::VDIVSSZrm_Int:
9624 case X86::VDIVSSZrmk_Int:
9625 case X86::VDIVSSZrmkz_Int:
9626 case X86::VDIVSSZrr_Int:
9627 case X86::VDIVSSZrrk_Int:
9628 case X86::VDIVSSZrrkz_Int:
9629 case X86::VDIVSSZrrb_Int:
9630 case X86::VDIVSSZrrbk_Int:
9631 case X86::VDIVSSZrrbkz_Int:
9632 case X86::VSQRTPDZ128m:
9633 case X86::VSQRTPDZ128mb:
9634 case X86::VSQRTPDZ128mbk:
9635 case X86::VSQRTPDZ128mbkz:
9636 case X86::VSQRTPDZ128mk:
9637 case X86::VSQRTPDZ128mkz:
9638 case X86::VSQRTPDZ128r:
9639 case X86::VSQRTPDZ128rk:
9640 case X86::VSQRTPDZ128rkz:
9641 case X86::VSQRTPDZ256m:
9642 case X86::VSQRTPDZ256mb:
9643 case X86::VSQRTPDZ256mbk:
9644 case X86::VSQRTPDZ256mbkz:
9645 case X86::VSQRTPDZ256mk:
9646 case X86::VSQRTPDZ256mkz:
9647 case X86::VSQRTPDZ256r:
9648 case X86::VSQRTPDZ256rk:
9649 case X86::VSQRTPDZ256rkz:
9650 case X86::VSQRTPDZm:
9651 case X86::VSQRTPDZmb:
9652 case X86::VSQRTPDZmbk:
9653 case X86::VSQRTPDZmbkz:
9654 case X86::VSQRTPDZmk:
9655 case X86::VSQRTPDZmkz:
9656 case X86::VSQRTPDZr:
9657 case X86::VSQRTPDZrb:
9658 case X86::VSQRTPDZrbk:
9659 case X86::VSQRTPDZrbkz:
9660 case X86::VSQRTPDZrk:
9661 case X86::VSQRTPDZrkz:
9662 case X86::VSQRTPSZ128m:
9663 case X86::VSQRTPSZ128mb:
9664 case X86::VSQRTPSZ128mbk:
9665 case X86::VSQRTPSZ128mbkz:
9666 case X86::VSQRTPSZ128mk:
9667 case X86::VSQRTPSZ128mkz:
9668 case X86::VSQRTPSZ128r:
9669 case X86::VSQRTPSZ128rk:
9670 case X86::VSQRTPSZ128rkz:
9671 case X86::VSQRTPSZ256m:
9672 case X86::VSQRTPSZ256mb:
9673 case X86::VSQRTPSZ256mbk:
9674 case X86::VSQRTPSZ256mbkz:
9675 case X86::VSQRTPSZ256mk:
9676 case X86::VSQRTPSZ256mkz:
9677 case X86::VSQRTPSZ256r:
9678 case X86::VSQRTPSZ256rk:
9679 case X86::VSQRTPSZ256rkz:
9680 case X86::VSQRTPSZm:
9681 case X86::VSQRTPSZmb:
9682 case X86::VSQRTPSZmbk:
9683 case X86::VSQRTPSZmbkz:
9684 case X86::VSQRTPSZmk:
9685 case X86::VSQRTPSZmkz:
9686 case X86::VSQRTPSZr:
9687 case X86::VSQRTPSZrb:
9688 case X86::VSQRTPSZrbk:
9689 case X86::VSQRTPSZrbkz:
9690 case X86::VSQRTPSZrk:
9691 case X86::VSQRTPSZrkz:
9692 case X86::VSQRTSDZm:
9693 case X86::VSQRTSDZm_Int:
9694 case X86::VSQRTSDZmk_Int:
9695 case X86::VSQRTSDZmkz_Int:
9696 case X86::VSQRTSDZr:
9697 case X86::VSQRTSDZr_Int:
9698 case X86::VSQRTSDZrk_Int:
9699 case X86::VSQRTSDZrkz_Int:
9700 case X86::VSQRTSDZrb_Int:
9701 case X86::VSQRTSDZrbk_Int:
9702 case X86::VSQRTSDZrbkz_Int:
9703 case X86::VSQRTSSZm:
9704 case X86::VSQRTSSZm_Int:
9705 case X86::VSQRTSSZmk_Int:
9706 case X86::VSQRTSSZmkz_Int:
9707 case X86::VSQRTSSZr:
9708 case X86::VSQRTSSZr_Int:
9709 case X86::VSQRTSSZrk_Int:
9710 case X86::VSQRTSSZrkz_Int:
9711 case X86::VSQRTSSZrb_Int:
9712 case X86::VSQRTSSZrbk_Int:
9713 case X86::VSQRTSSZrbkz_Int:
9715 case X86::VGATHERDPDYrm:
9716 case X86::VGATHERDPDZ128rm:
9717 case X86::VGATHERDPDZ256rm:
9718 case X86::VGATHERDPDZrm:
9719 case X86::VGATHERDPDrm:
9720 case X86::VGATHERDPSYrm:
9721 case X86::VGATHERDPSZ128rm:
9722 case X86::VGATHERDPSZ256rm:
9723 case X86::VGATHERDPSZrm:
9724 case X86::VGATHERDPSrm:
9725 case X86::VGATHERPF0DPDm:
9726 case X86::VGATHERPF0DPSm:
9727 case X86::VGATHERPF0QPDm:
9728 case X86::VGATHERPF0QPSm:
9729 case X86::VGATHERPF1DPDm:
9730 case X86::VGATHERPF1DPSm:
9731 case X86::VGATHERPF1QPDm:
9732 case X86::VGATHERPF1QPSm:
9733 case X86::VGATHERQPDYrm:
9734 case X86::VGATHERQPDZ128rm:
9735 case X86::VGATHERQPDZ256rm:
9736 case X86::VGATHERQPDZrm:
9737 case X86::VGATHERQPDrm:
9738 case X86::VGATHERQPSYrm:
9739 case X86::VGATHERQPSZ128rm:
9740 case X86::VGATHERQPSZ256rm:
9741 case X86::VGATHERQPSZrm:
9742 case X86::VGATHERQPSrm:
9743 case X86::VPGATHERDDYrm:
9744 case X86::VPGATHERDDZ128rm:
9745 case X86::VPGATHERDDZ256rm:
9746 case X86::VPGATHERDDZrm:
9747 case X86::VPGATHERDDrm:
9748 case X86::VPGATHERDQYrm:
9749 case X86::VPGATHERDQZ128rm:
9750 case X86::VPGATHERDQZ256rm:
9751 case X86::VPGATHERDQZrm:
9752 case X86::VPGATHERDQrm:
9753 case X86::VPGATHERQDYrm:
9754 case X86::VPGATHERQDZ128rm:
9755 case X86::VPGATHERQDZ256rm:
9756 case X86::VPGATHERQDZrm:
9757 case X86::VPGATHERQDrm:
9758 case X86::VPGATHERQQYrm:
9759 case X86::VPGATHERQQZ128rm:
9760 case X86::VPGATHERQQZ256rm:
9761 case X86::VPGATHERQQZrm:
9762 case X86::VPGATHERQQrm:
9763 case X86::VSCATTERDPDZ128mr:
9764 case X86::VSCATTERDPDZ256mr:
9765 case X86::VSCATTERDPDZmr:
9766 case X86::VSCATTERDPSZ128mr:
9767 case X86::VSCATTERDPSZ256mr:
9768 case X86::VSCATTERDPSZmr:
9769 case X86::VSCATTERPF0DPDm:
9770 case X86::VSCATTERPF0DPSm:
9771 case X86::VSCATTERPF0QPDm:
9772 case X86::VSCATTERPF0QPSm:
9773 case X86::VSCATTERPF1DPDm:
9774 case X86::VSCATTERPF1DPSm:
9775 case X86::VSCATTERPF1QPDm:
9776 case X86::VSCATTERPF1QPSm:
9777 case X86::VSCATTERQPDZ128mr:
9778 case X86::VSCATTERQPDZ256mr:
9779 case X86::VSCATTERQPDZmr:
9780 case X86::VSCATTERQPSZ128mr:
9781 case X86::VSCATTERQPSZ256mr:
9782 case X86::VSCATTERQPSZmr:
9783 case X86::VPSCATTERDDZ128mr:
9784 case X86::VPSCATTERDDZ256mr:
9785 case X86::VPSCATTERDDZmr:
9786 case X86::VPSCATTERDQZ128mr:
9787 case X86::VPSCATTERDQZ256mr:
9788 case X86::VPSCATTERDQZmr:
9789 case X86::VPSCATTERQDZ128mr:
9790 case X86::VPSCATTERQDZ256mr:
9791 case X86::VPSCATTERQDZmr:
9792 case X86::VPSCATTERQQZ128mr:
9793 case X86::VPSCATTERQQZ256mr:
9794 case X86::VPSCATTERQQZmr:
9804 unsigned UseIdx)
const {
9811 Inst.
getNumDefs() <= 2 &&
"Reassociation needs binary operators");
9821 assert((Inst.
getNumDefs() == 1 || FlagDef) &&
"Implicit def isn't flags?");
9822 if (FlagDef && !FlagDef->
isDead())
9833 bool Invert)
const {
9885 case X86::VPANDDZ128rr:
9886 case X86::VPANDDZ256rr:
9887 case X86::VPANDDZrr:
9888 case X86::VPANDQZ128rr:
9889 case X86::VPANDQZ256rr:
9890 case X86::VPANDQZrr:
9893 case X86::VPORDZ128rr:
9894 case X86::VPORDZ256rr:
9896 case X86::VPORQZ128rr:
9897 case X86::VPORQZ256rr:
9901 case X86::VPXORDZ128rr:
9902 case X86::VPXORDZ256rr:
9903 case X86::VPXORDZrr:
9904 case X86::VPXORQZ128rr:
9905 case X86::VPXORQZ256rr:
9906 case X86::VPXORQZrr:
9909 case X86::VANDPDYrr:
9910 case X86::VANDPSYrr:
9911 case X86::VANDPDZ128rr:
9912 case X86::VANDPSZ128rr:
9913 case X86::VANDPDZ256rr:
9914 case X86::VANDPSZ256rr:
9915 case X86::VANDPDZrr:
9916 case X86::VANDPSZrr:
9921 case X86::VORPDZ128rr:
9922 case X86::VORPSZ128rr:
9923 case X86::VORPDZ256rr:
9924 case X86::VORPSZ256rr:
9929 case X86::VXORPDYrr:
9930 case X86::VXORPSYrr:
9931 case X86::VXORPDZ128rr:
9932 case X86::VXORPSZ128rr:
9933 case X86::VXORPDZ256rr:
9934 case X86::VXORPSZ256rr:
9935 case X86::VXORPDZrr:
9936 case X86::VXORPSZrr:
9957 case X86::VPADDBYrr:
9958 case X86::VPADDWYrr:
9959 case X86::VPADDDYrr:
9960 case X86::VPADDQYrr:
9961 case X86::VPADDBZ128rr:
9962 case X86::VPADDWZ128rr:
9963 case X86::VPADDDZ128rr:
9964 case X86::VPADDQZ128rr:
9965 case X86::VPADDBZ256rr:
9966 case X86::VPADDWZ256rr:
9967 case X86::VPADDDZ256rr:
9968 case X86::VPADDQZ256rr:
9969 case X86::VPADDBZrr:
9970 case X86::VPADDWZrr:
9971 case X86::VPADDDZrr:
9972 case X86::VPADDQZrr:
9973 case X86::VPMULLWrr:
9974 case X86::VPMULLWYrr:
9975 case X86::VPMULLWZ128rr:
9976 case X86::VPMULLWZ256rr:
9977 case X86::VPMULLWZrr:
9978 case X86::VPMULLDrr:
9979 case X86::VPMULLDYrr:
9980 case X86::VPMULLDZ128rr:
9981 case X86::VPMULLDZ256rr:
9982 case X86::VPMULLDZrr:
9983 case X86::VPMULLQZ128rr:
9984 case X86::VPMULLQZ256rr:
9985 case X86::VPMULLQZrr:
9986 case X86::VPMAXSBrr:
9987 case X86::VPMAXSBYrr:
9988 case X86::VPMAXSBZ128rr:
9989 case X86::VPMAXSBZ256rr:
9990 case X86::VPMAXSBZrr:
9991 case X86::VPMAXSDrr:
9992 case X86::VPMAXSDYrr:
9993 case X86::VPMAXSDZ128rr:
9994 case X86::VPMAXSDZ256rr:
9995 case X86::VPMAXSDZrr:
9996 case X86::VPMAXSQZ128rr:
9997 case X86::VPMAXSQZ256rr:
9998 case X86::VPMAXSQZrr:
9999 case X86::VPMAXSWrr:
10000 case X86::VPMAXSWYrr:
10001 case X86::VPMAXSWZ128rr:
10002 case X86::VPMAXSWZ256rr:
10003 case X86::VPMAXSWZrr:
10004 case X86::VPMAXUBrr:
10005 case X86::VPMAXUBYrr:
10006 case X86::VPMAXUBZ128rr:
10007 case X86::VPMAXUBZ256rr:
10008 case X86::VPMAXUBZrr:
10009 case X86::VPMAXUDrr:
10010 case X86::VPMAXUDYrr:
10011 case X86::VPMAXUDZ128rr:
10012 case X86::VPMAXUDZ256rr:
10013 case X86::VPMAXUDZrr:
10014 case X86::VPMAXUQZ128rr:
10015 case X86::VPMAXUQZ256rr:
10016 case X86::VPMAXUQZrr:
10017 case X86::VPMAXUWrr:
10018 case X86::VPMAXUWYrr:
10019 case X86::VPMAXUWZ128rr:
10020 case X86::VPMAXUWZ256rr:
10021 case X86::VPMAXUWZrr:
10022 case X86::VPMINSBrr:
10023 case X86::VPMINSBYrr:
10024 case X86::VPMINSBZ128rr:
10025 case X86::VPMINSBZ256rr:
10026 case X86::VPMINSBZrr:
10027 case X86::VPMINSDrr:
10028 case X86::VPMINSDYrr:
10029 case X86::VPMINSDZ128rr:
10030 case X86::VPMINSDZ256rr:
10031 case X86::VPMINSDZrr:
10032 case X86::VPMINSQZ128rr:
10033 case X86::VPMINSQZ256rr:
10034 case X86::VPMINSQZrr:
10035 case X86::VPMINSWrr:
10036 case X86::VPMINSWYrr:
10037 case X86::VPMINSWZ128rr:
10038 case X86::VPMINSWZ256rr:
10039 case X86::VPMINSWZrr:
10040 case X86::VPMINUBrr:
10041 case X86::VPMINUBYrr:
10042 case X86::VPMINUBZ128rr:
10043 case X86::VPMINUBZ256rr:
10044 case X86::VPMINUBZrr:
10045 case X86::VPMINUDrr:
10046 case X86::VPMINUDYrr:
10047 case X86::VPMINUDZ128rr:
10048 case X86::VPMINUDZ256rr:
10049 case X86::VPMINUDZrr:
10050 case X86::VPMINUQZ128rr:
10051 case X86::VPMINUQZ256rr:
10052 case X86::VPMINUQZrr:
10053 case X86::VPMINUWrr:
10054 case X86::VPMINUWYrr:
10055 case X86::VPMINUWZ128rr:
10056 case X86::VPMINUWZ256rr:
10057 case X86::VPMINUWZrr:
10061 case X86::MAXCPDrr:
10062 case X86::MAXCPSrr:
10063 case X86::MAXCSDrr:
10064 case X86::MAXCSSrr:
10065 case X86::MINCPDrr:
10066 case X86::MINCPSrr:
10067 case X86::MINCSDrr:
10068 case X86::MINCSSrr:
10069 case X86::VMAXCPDrr:
10070 case X86::VMAXCPSrr:
10071 case X86::VMAXCPDYrr:
10072 case X86::VMAXCPSYrr:
10073 case X86::VMAXCPDZ128rr:
10074 case X86::VMAXCPSZ128rr:
10075 case X86::VMAXCPDZ256rr:
10076 case X86::VMAXCPSZ256rr:
10077 case X86::VMAXCPDZrr:
10078 case X86::VMAXCPSZrr:
10079 case X86::VMAXCSDrr:
10080 case X86::VMAXCSSrr:
10081 case X86::VMAXCSDZrr:
10082 case X86::VMAXCSSZrr:
10083 case X86::VMINCPDrr:
10084 case X86::VMINCPSrr:
10085 case X86::VMINCPDYrr:
10086 case X86::VMINCPSYrr:
10087 case X86::VMINCPDZ128rr:
10088 case X86::VMINCPSZ128rr:
10089 case X86::VMINCPDZ256rr:
10090 case X86::VMINCPSZ256rr:
10091 case X86::VMINCPDZrr:
10092 case X86::VMINCPSZrr:
10093 case X86::VMINCSDrr:
10094 case X86::VMINCSSrr:
10095 case X86::VMINCSDZrr:
10096 case X86::VMINCSSZrr:
10097 case X86::VMAXCPHZ128rr:
10098 case X86::VMAXCPHZ256rr:
10099 case X86::VMAXCPHZrr:
10100 case X86::VMAXCSHZrr:
10101 case X86::VMINCPHZ128rr:
10102 case X86::VMINCPHZ256rr:
10103 case X86::VMINCPHZrr:
10104 case X86::VMINCSHZrr:
10114 case X86::VADDPDrr:
10115 case X86::VADDPSrr:
10116 case X86::VADDPDYrr:
10117 case X86::VADDPSYrr:
10118 case X86::VADDPDZ128rr:
10119 case X86::VADDPSZ128rr:
10120 case X86::VADDPDZ256rr:
10121 case X86::VADDPSZ256rr:
10122 case X86::VADDPDZrr:
10123 case X86::VADDPSZrr:
10124 case X86::VADDSDrr:
10125 case X86::VADDSSrr:
10126 case X86::VADDSDZrr:
10127 case X86::VADDSSZrr:
10128 case X86::VMULPDrr:
10129 case X86::VMULPSrr:
10130 case X86::VMULPDYrr:
10131 case X86::VMULPSYrr:
10132 case X86::VMULPDZ128rr:
10133 case X86::VMULPSZ128rr:
10134 case X86::VMULPDZ256rr:
10135 case X86::VMULPSZ256rr:
10136 case X86::VMULPDZrr:
10137 case X86::VMULPSZrr:
10138 case X86::VMULSDrr:
10139 case X86::VMULSSrr:
10140 case X86::VMULSDZrr:
10141 case X86::VMULSSZrr:
10142 case X86::VADDPHZ128rr:
10143 case X86::VADDPHZ256rr:
10144 case X86::VADDPHZrr:
10145 case X86::VADDSHZrr:
10146 case X86::VMULPHZ128rr:
10147 case X86::VMULPHZ256rr:
10148 case X86::VMULPHZrr:
10149 case X86::VMULSHZrr:
10160static std::optional<ParamLoadedValue>
10163 Register DestReg =
MI.getOperand(0).getReg();
10164 Register SrcReg =
MI.getOperand(1).getReg();
10169 if (DestReg == DescribedReg)
10174 if (
unsigned SubRegIdx =
TRI->getSubRegIndex(DestReg, DescribedReg)) {
10175 Register SrcSubReg =
TRI->getSubReg(SrcReg, SubRegIdx);
10185 if (
MI.getOpcode() == X86::MOV8rr ||
MI.getOpcode() == X86::MOV16rr ||
10186 !
TRI->isSuperRegister(DestReg, DescribedReg))
10187 return std::nullopt;
10189 assert(
MI.getOpcode() == X86::MOV32rr &&
"Unexpected super-register case");
10193std::optional<ParamLoadedValue>
10200 switch (
MI.getOpcode()) {
10203 case X86::LEA64_32r: {
10205 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10206 return std::nullopt;
10210 if (!
MI.getOperand(4).isImm() || !
MI.getOperand(2).isImm())
10211 return std::nullopt;
10220 if ((Op1.
isReg() && Op1.
getReg() ==
MI.getOperand(0).getReg()) ||
10221 Op2.
getReg() ==
MI.getOperand(0).getReg())
10222 return std::nullopt;
10223 else if ((Op1.
isReg() && Op1.
getReg() != X86::NoRegister &&
10224 TRI->regsOverlap(Op1.
getReg(),
MI.getOperand(0).getReg())) ||
10225 (Op2.
getReg() != X86::NoRegister &&
10226 TRI->regsOverlap(Op2.
getReg(),
MI.getOperand(0).getReg())))
10227 return std::nullopt;
10229 int64_t Coef =
MI.getOperand(2).getImm();
10230 int64_t
Offset =
MI.getOperand(4).getImm();
10233 if ((Op1.
isReg() && Op1.
getReg() != X86::NoRegister)) {
10235 }
else if (Op1.
isFI())
10238 if (
Op &&
Op->isReg() &&
Op->getReg() == Op2.
getReg() && Coef > 0) {
10239 Ops.push_back(dwarf::DW_OP_constu);
10240 Ops.push_back(Coef + 1);
10241 Ops.push_back(dwarf::DW_OP_mul);
10243 if (
Op && Op2.
getReg() != X86::NoRegister) {
10244 int dwarfReg =
TRI->getDwarfRegNum(Op2.
getReg(),
false);
10246 return std::nullopt;
10247 else if (dwarfReg < 32) {
10248 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
10251 Ops.push_back(dwarf::DW_OP_bregx);
10252 Ops.push_back(dwarfReg);
10262 Ops.push_back(dwarf::DW_OP_constu);
10263 Ops.push_back(Coef);
10264 Ops.push_back(dwarf::DW_OP_mul);
10267 if (((Op1.
isReg() && Op1.
getReg() != X86::NoRegister) || Op1.
isFI()) &&
10268 Op2.
getReg() != X86::NoRegister) {
10269 Ops.push_back(dwarf::DW_OP_plus);
10281 return std::nullopt;
10284 case X86::MOV64ri32:
10287 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10288 return std::nullopt;
10295 case X86::XOR32rr: {
10298 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10299 return std::nullopt;
10300 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
10302 return std::nullopt;
10304 case X86::MOVSX64rr32: {
10311 if (!
TRI->isSubRegisterEq(
MI.getOperand(0).getReg(), Reg))
10312 return std::nullopt;
10321 if (Reg ==
MI.getOperand(0).getReg())
10324 assert(X86MCRegisterClasses[X86::GR32RegClassID].
contains(Reg) &&
10325 "Unhandled sub-register case for MOVSX64rr32");
10330 assert(!
MI.isMoveImmediate() &&
"Unexpected MoveImm instruction");
10347 assert(!OldFlagDef1 == !OldFlagDef2 &&
10348 "Unexpected instruction type for reassociation");
10350 if (!OldFlagDef1 || !OldFlagDef2)
10354 "Must have dead EFLAGS operand in reassociable instruction");
10361 assert(NewFlagDef1 && NewFlagDef2 &&
10362 "Unexpected operand in reassociable instruction");
10372std::pair<unsigned, unsigned>
10374 return std::make_pair(TF, 0u);
10379 using namespace X86II;
10380 static const std::pair<unsigned, const char *> TargetFlags[] = {
10381 {MO_GOT_ABSOLUTE_ADDRESS,
"x86-got-absolute-address"},
10382 {MO_PIC_BASE_OFFSET,
"x86-pic-base-offset"},
10383 {MO_GOT,
"x86-got"},
10384 {MO_GOTOFF,
"x86-gotoff"},
10385 {MO_GOTPCREL,
"x86-gotpcrel"},
10386 {MO_GOTPCREL_NORELAX,
"x86-gotpcrel-norelax"},
10387 {MO_PLT,
"x86-plt"},
10388 {MO_TLSGD,
"x86-tlsgd"},
10389 {MO_TLSLD,
"x86-tlsld"},
10390 {MO_TLSLDM,
"x86-tlsldm"},
10391 {MO_GOTTPOFF,
"x86-gottpoff"},
10392 {MO_INDNTPOFF,
"x86-indntpoff"},
10393 {MO_TPOFF,
"x86-tpoff"},
10394 {MO_DTPOFF,
"x86-dtpoff"},
10395 {MO_NTPOFF,
"x86-ntpoff"},
10396 {MO_GOTNTPOFF,
"x86-gotntpoff"},
10397 {MO_DLLIMPORT,
"x86-dllimport"},
10398 {MO_DARWIN_NONLAZY,
"x86-darwin-nonlazy"},
10399 {MO_DARWIN_NONLAZY_PIC_BASE,
"x86-darwin-nonlazy-pic-base"},
10400 {MO_TLVP,
"x86-tlvp"},
10401 {MO_TLVP_PIC_BASE,
"x86-tlvp-pic-base"},
10402 {MO_SECREL,
"x86-secrel"},
10403 {MO_COFFSTUB,
"x86-coffstub"}};
10437std::optional<std::unique_ptr<outliner::OutlinedFunction>>
10440 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
10441 unsigned MinRepeats)
const {
10442 unsigned SequenceSize = 0;
10443 for (
auto &
MI : RepeatedSequenceLocs[0]) {
10447 if (
MI.isDebugInstr() ||
MI.isKill())
10454 unsigned CFICount = 0;
10455 for (
auto &
I : RepeatedSequenceLocs[0]) {
10456 if (
I.isCFIInstruction())
10466 std::vector<MCCFIInstruction> CFIInstructions =
10467 C.getMF()->getFrameInstructions();
10469 if (CFICount > 0 && CFICount != CFIInstructions.size())
10470 return std::nullopt;
10474 if (RepeatedSequenceLocs[0].back().isTerminator()) {
10478 return std::make_unique<outliner::OutlinedFunction>(
10479 RepeatedSequenceLocs, SequenceSize,
10486 return std::nullopt;
10491 return std::make_unique<outliner::OutlinedFunction>(
10501 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
10510 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
10520 unsigned Flags)
const {
10524 if (
MI.isTerminator())
10538 if (
MI.modifiesRegister(X86::RSP, &RI) ||
MI.readsRegister(X86::RSP, &RI) ||
10539 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
10540 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
10544 if (
MI.readsRegister(X86::RIP, &RI) ||
10545 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
10546 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
10550 if (
MI.isCFIInstruction())
10566 MBB.insert(
MBB.end(), retq);
10576 .addGlobalAddress(M.getNamedValue(MF.
getName())));
10580 .addGlobalAddress(M.getNamedValue(MF.
getName())));
10589 bool AllowSideEffects)
const {
10594 if (ST.hasMMX() && X86::VR64RegClass.contains(Reg))
10598 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
10603 if (!AllowSideEffects)
10610 }
else if (X86::VR128RegClass.
contains(Reg)) {
10616 }
else if (X86::VR256RegClass.
contains(Reg)) {
10622 }
else if (X86::VR512RegClass.
contains(Reg)) {
10624 if (!ST.hasAVX512())
10628 }
else if (X86::VK1RegClass.
contains(Reg) || X86::VK2RegClass.
contains(Reg) ||
10630 X86::VK16RegClass.
contains(Reg)) {
10634 unsigned Op = ST.hasBWI() ? X86::KSET0Q : X86::KSET0W;
10641 bool DoRegPressureReduce)
const {
10644 case X86::VPDPWSSDrr:
10645 case X86::VPDPWSSDrm:
10646 case X86::VPDPWSSDYrr:
10647 case X86::VPDPWSSDYrm: {
10648 if (!Subtarget.hasFastDPWSSD()) {
10654 case X86::VPDPWSSDZ128rr:
10655 case X86::VPDPWSSDZ128rm:
10656 case X86::VPDPWSSDZ256rr:
10657 case X86::VPDPWSSDZ256rm:
10658 case X86::VPDPWSSDZrr:
10659 case X86::VPDPWSSDZrm: {
10660 if (Subtarget.hasBWI() && !Subtarget.hasFastDPWSSD()) {
10668 Patterns, DoRegPressureReduce);
10680 unsigned AddOpc = 0;
10681 unsigned MaddOpc = 0;
10684 assert(
false &&
"It should not reach here");
10690 case X86::VPDPWSSDrr:
10691 MaddOpc = X86::VPMADDWDrr;
10692 AddOpc = X86::VPADDDrr;
10694 case X86::VPDPWSSDrm:
10695 MaddOpc = X86::VPMADDWDrm;
10696 AddOpc = X86::VPADDDrr;
10698 case X86::VPDPWSSDZ128rr:
10699 MaddOpc = X86::VPMADDWDZ128rr;
10700 AddOpc = X86::VPADDDZ128rr;
10702 case X86::VPDPWSSDZ128rm:
10703 MaddOpc = X86::VPMADDWDZ128rm;
10704 AddOpc = X86::VPADDDZ128rr;
10710 case X86::VPDPWSSDYrr:
10711 MaddOpc = X86::VPMADDWDYrr;
10712 AddOpc = X86::VPADDDYrr;
10714 case X86::VPDPWSSDYrm:
10715 MaddOpc = X86::VPMADDWDYrm;
10716 AddOpc = X86::VPADDDYrr;
10718 case X86::VPDPWSSDZ256rr:
10719 MaddOpc = X86::VPMADDWDZ256rr;
10720 AddOpc = X86::VPADDDZ256rr;
10722 case X86::VPDPWSSDZ256rm:
10723 MaddOpc = X86::VPMADDWDZ256rm;
10724 AddOpc = X86::VPADDDZ256rr;
10730 case X86::VPDPWSSDZrr:
10731 MaddOpc = X86::VPMADDWDZrr;
10732 AddOpc = X86::VPADDDZrr;
10734 case X86::VPDPWSSDZrm:
10735 MaddOpc = X86::VPMADDWDZrm;
10736 AddOpc = X86::VPADDDZrr;
10748 InstrIdxForVirtReg.
insert(std::make_pair(NewReg, 0));
10770 DelInstrs, InstrIdxForVirtReg);
10774 InstrIdxForVirtReg);
10784 M.Base.FrameIndex = FI;
10785 M.getFullAddress(
Ops);
10788#define GET_INSTRINFO_HELPERS
10789#include "X86GenInstrInfo.inc"
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool isFrameStoreOpcode(int Opcode)
static bool isFrameLoadOpcode(int Opcode)
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static SDValue isNOT(SDValue V, SelectionDAG &DAG)
static bool Expand2AddrUndef(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
Expand a single-def pseudo instruction to a two-addr instruction with two undef reads of the register...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
#define FROM_TO(FROM, TO)
cl::opt< bool > X86EnableAPXForRelocation
static bool is64Bit(const char *name)
#define GET_EGPR_IF_ENABLED(OPC)
static bool isLEA(unsigned Opcode)
static void addOperands(MachineInstrBuilder &MIB, ArrayRef< MachineOperand > MOs, int PtrOffset=0)
static std::optional< ParamLoadedValue > describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, const TargetRegisterInfo *TRI)
If DescribedReg overlaps with the MOVrr instruction's destination register then, if possible,...
static cl::opt< unsigned > PartialRegUpdateClearance("partial-reg-update-clearance", cl::desc("Clearance between two register writes " "for inserting XOR to avoid partial " "register update"), cl::init(64), cl::Hidden)
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI)
static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg, const X86Subtarget &Subtarget)
static bool isConvertibleLEA(MachineInstr *MI)
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, const X86Subtarget &Subtarget)
static bool isAMXOpcode(unsigned Opc)
static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI, Register Reg)
static void updateOperandRegConstraints(MachineFunction &MF, MachineInstr &NewMI, const TargetInstrInfo &TII)
static int getJumpTableIndexFromAddr(const MachineInstr &MI)
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, unsigned NewWidth, unsigned *pNewMask=nullptr)
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, bool MinusOne)
static unsigned getNewOpcFromTable(ArrayRef< X86TableEntry > Table, unsigned Opc)
static unsigned getStoreRegOpcode(Register SrcReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
#define FOLD_BROADCAST(SIZE)
static cl::opt< unsigned > UndefRegClearance("undef-reg-clearance", cl::desc("How many idle instructions we would like before " "certain undef register reads"), cl::init(128), cl::Hidden)
#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64)
static bool isTruncatedShiftCountForLEA(unsigned ShAmt)
Check whether the given shift count is appropriate can be represented by a LEA instruction.
static cl::opt< bool > ReMatPICStubLoad("remat-pic-stub-load", cl::desc("Re-materialize load from stub in PIC mode"), cl::init(false), cl::Hidden)
static SmallVector< MachineMemOperand *, 2 > extractLoadMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static MachineInstr * fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII)
static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx)
static bool canConvert2Copy(unsigned Opc)
static cl::opt< bool > NoFusing("disable-spill-fusing", cl::desc("Disable fusing of spill code into instructions"), cl::Hidden)
static bool expandNOVLXStore(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &StoreDesc, const MCInstrDesc &ExtractDesc, unsigned SubIdx)
static bool isX87Reg(Register Reg)
Return true if the Reg is X87 register.
static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, Register Reg)
Expand a single-def pseudo instruction to a two-addr instruction with two k0 reads.
#define VPERM_CASES_BROADCAST(Suffix)
static std::pair< X86::CondCode, unsigned > isUseDefConvertible(const MachineInstr &MI)
Check whether the use can be converted to remove a comparison against zero.
static bool findRedundantFlagInstr(MachineInstr &CmpInstr, MachineInstr &CmpValDefInstr, const MachineRegisterInfo *MRI, MachineInstr **AndInstr, const TargetRegisterInfo *TRI, const X86Subtarget &ST, bool &NoSignFlag, bool &ClearsOverflowFlag)
static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
static unsigned getLoadRegOpcode(Register DestReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
static void expandLoadStackGuard(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, bool ForLoadFold=false)
static MachineInstr * makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI)
#define GET_ND_IF_ENABLED(OPC)
static bool expandMOVSHP(MachineInstrBuilder &MIB, MachineInstr &MI, const TargetInstrInfo &TII, bool HasAVX)
static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget, bool ForLoadFold=false)
Return true for all instructions that only update the first 32 or 64-bits of the destination register...
static const uint16_t * lookupAVX512(unsigned opcode, unsigned domain, ArrayRef< uint16_t[4]> Table)
static unsigned getLoadStoreRegOpcode(Register Reg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI, bool Load)
#define VPERM_CASES(Suffix)
#define FROM_TO_SIZE(A, B, S)
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, bool &ClearsOverflowFlag)
Check whether the definition can be converted to remove a comparison against zero.
static MachineInstr * fuseInst(MachineFunction &MF, unsigned Opcode, unsigned OpNo, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII, int PtrOffset=0)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode)
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static MachineBasicBlock * getFallThroughMBB(MachineBasicBlock *MBB, MachineBasicBlock *TBB)
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, const MachineInstr &UserMI, const MachineFunction &MF)
Check if LoadMI is a partial register load that we can't fold into MI because the latter uses content...
static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI)
static bool isHReg(Register Reg)
Test if the given register is a physical h register.
static cl::opt< bool > PrintFailedFusing("print-failed-fuse-candidates", cl::desc("Print instructions that the allocator wants to" " fuse, but the X86 backend currently can't"), cl::Hidden)
static bool expandNOVLXLoad(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &LoadDesc, const MCInstrDesc &BroadcastDesc, unsigned SubIdx)
static void genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
This determines which of three possible cases of a three source commute the source indexes correspond...
static unsigned getTruncatedShiftCount(const MachineInstr &MI, unsigned ShiftAmtOperandIdx)
Check whether the shift count for a machine operand is non-zero.
static SmallVector< MachineMemOperand *, 2 > extractStoreMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static unsigned getBroadcastOpcode(const X86FoldTableEntry *I, const TargetRegisterClass *RC, const X86Subtarget &STI)
static unsigned convertALUrr2ALUri(unsigned Opc)
Convert an ALUrr opcode to corresponding ALUri opcode.
static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI)
Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool isCommutableVPERMV3Instruction(unsigned Opcode)
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
LiveInterval - This class represents the liveness of a register, or stack slot.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
A set of physical registers with utility functions to track liveness when walking backward/forward th...
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
static LocationSize precise(uint64_t Value)
bool usesWindowsCFI() const
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
MachineInstrBundleIterator< const MachineInstr > const_iterator
void push_back(MachineInstr *MI)
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
LLVM_ABI void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_iterator operands_begin()
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
void dropDebugNumber()
Drop any variable location debugging information associated with this instruction.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
unsigned getNumDefs() const
Returns the total number of definitions.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateCPI(unsigned Idx, int Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
A Module instance is used to store all the information related to an LLVM module.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
MachineFunction & getMachineFunction() const
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getFP128Ty(LLVMContext &C)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
SlotIndex def
The index of the defining instruction.
LLVM Value Representation.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const override
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
Check if there exists an earlier instruction that operates on the same source operands and sets eflag...
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Overrides the isSchedulingBoundary from Codegen/TargetInstrInfo.cpp to make it capable of identifying...
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Returns true iff the routine could find two commutable operands in the given machine instruction.
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
X86InstrInfo(const X86Subtarget &STI)
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
const X86RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override
Returns true if we have preference on the operands order in MI, the commute decision is returned in C...
bool hasLiveCondCodeDef(MachineInstr &MI) const
True if MI has a condition code def, e.g.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
convertToThreeAddress - This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_AD...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
MCInst getNop() const override
Return the noop instruction to use for a noop.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Fold a load or store of the specified stack slot into the specified machine instruction for the speci...
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const override
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool isUnconditionalTailCall(const MachineInstr &MI) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned LEAOpcode, bool AllowSP, Register &NewSrc, unsigned &NewSrcSubReg, bool &isKill, MachineOperand &ImplicitOp, LiveVariables *LV, LiveIntervals *LIS) const
Given an operand within a MachineInstr, insert preceding code to put it into the right format for a p...
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isLoadFromStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool setExecutionDomainCustom(MachineInstr &MI, unsigned Domain) const
int getSPAdjust(const MachineInstr &MI) const override
getSPAdjust - This returns the stack pointer adjustment made by this instruction.
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isReMaterializableImpl(const MachineInstr &MI) const override
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
int getJumpTableIndex(const MachineInstr &MI) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
This is an architecture-specific helper function of reassociateOps.
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
isCoalescableExtInstr - Return true if the instruction is a "coalescable" extension instruction.
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Opc, Register Reg, int FrameIdx, bool isKill=false) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds potential patterns, this function generates the instructions ...
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, TargetInstrInfo::MachineBranchPredicate &MBP, bool AllowModify=false) const override
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before certain undef register...
void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
int64_t getFrameAdjustment(const MachineInstr &I) const
Returns the stack pointer adjustment that happens inside the frame setup..destroy sequence (e....
bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override
uint16_t getExecutionDomainCustom(const MachineInstr &MI) const
bool isHighLatencyDef(int opc) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getFMA3OpcodeToCommuteOperands(const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, const X86InstrFMA3Group &FMA3Group) const
Returns an adjusted FMA opcode that must be used in FMA instruction that performs the same computatio...
bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before a partial register upd...
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
Register getGlobalBaseReg() const
int getTCReturnAddrDelta() const
void setGlobalBaseReg(Register Reg)
bool getUsesRedZone() const
const TargetRegisterClass * constrainRegClassToNonRex2(const TargetRegisterClass *RC) const
const X86RegisterInfo * getRegisterInfo() const override
const X86FrameLowering * getFrameLowering() const override
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ X86
Windows x64, Windows Itanium (IA-64)
X86II - This namespace holds all of the target specific flags that instruction info tracks.
bool isKMergeMasked(uint64_t TSFlags)
bool hasNewDataDest(uint64_t TSFlags)
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ SSEDomainShift
Execution domain for SSE instructions.
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isPseudo(uint64_t TSFlags)
bool isKMasked(uint64_t TSFlags)
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Define some predicates that are used for node matching.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode getCondFromCFCMov(const MachineInstr &MI)
CondCode getCondFromMI(const MachineInstr &MI)
Return the condition code of the instruction.
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
unsigned getSwappedVCMPImm(unsigned Imm)
Get the VCMP immediate if the opcodes are swapped.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
unsigned getSwappedVPCOMImm(unsigned Imm)
Get the VPCOM immediate if the opcodes are swapped.
bool isX87Instruction(MachineInstr &MI)
Check if the instruction is X87 instruction.
unsigned getNonNDVariant(unsigned Opc)
unsigned getVPCMPImmForCond(ISD::CondCode CC)
Get the VPCMP immediate for the given condition.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
CondCode getCondFromSETCC(const MachineInstr &MI)
unsigned getSwappedVPCMPImm(unsigned Imm)
Get the VPCMP immediate if the opcodes are swapped.
CondCode getCondFromCCMP(const MachineInstr &MI)
int getCCMPCondFlagsFromCondCode(CondCode CC)
int getCondSrcNoFromDesc(const MCInstrDesc &MCID)
Return the source operand # for condition code by MCID.
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
unsigned getNFVariant(unsigned Opc)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
CondCode getCondFromCMov(const MachineInstr &MI)
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static bool isAddMemInstrWithRelocation(const MachineInstr &MI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
static bool isMem(const MachineInstr &MI, unsigned Op)
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
static const MachineInstrBuilder & addRegReg(const MachineInstrBuilder &MIB, Register Reg1, bool isKill1, unsigned SubReg1, Register Reg2, bool isKill2, unsigned SubReg2)
addRegReg - This function is used to add a memory reference of the form: [Reg + Reg].
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr RegState getDeadRegState(bool B)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
const X86FoldTableEntry * lookupBroadcastFoldTable(unsigned RegOp, unsigned OpNum)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
const X86InstrFMA3Group * getFMA3Group(unsigned Opcode, uint64_t TSFlags)
Returns a reference to a group of FMA3 opcodes to where the given Opcode is included.
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const X86FoldTableEntry * lookupTwoAddrFoldTable(unsigned RegOp)
FunctionAddr VTableAddr Count
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
constexpr RegState getDefRegState(bool B)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
RegState getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
static bool isMemInstrWithGOTPCREL(const MachineInstr &MI)
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
const X86FoldTableEntry * lookupUnfoldTable(unsigned MemOp)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool matchBroadcastSize(const X86FoldTableEntry &Entry, unsigned BroadcastBits)
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
const X86FoldTableEntry * lookupFoldTable(unsigned RegOp, unsigned OpNum)
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, Register Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
constexpr RegState getUndefRegState(bool B)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
std::vector< MachineInstr * > Kills
Kills - List of MachineInstruction's which are the last use of this virtual register (kill it) in the...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType
This class is used to group {132, 213, 231} forms of FMA opcodes together.
unsigned get213Opcode() const
Returns the 213 form of FMA opcode.
unsigned get231Opcode() const
Returns the 231 form of FMA opcode.
bool isIntrinsic() const
Returns true iff the group of FMA opcodes holds intrinsic opcodes.
unsigned get132Opcode() const
Returns the 132 form of FMA opcode.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.