24 #define CASE_SSE_INS_COMMON(Inst, src) \
27 #define CASE_AVX_INS_COMMON(Inst, Suffix, src) \
28 case X86::V##Inst##Suffix##src:
30 #define CASE_MASK_INS_COMMON(Inst, Suffix, src) \
31 case X86::V##Inst##Suffix##src##k:
33 #define CASE_MASKZ_INS_COMMON(Inst, Suffix, src) \
34 case X86::V##Inst##Suffix##src##kz:
36 #define CASE_AVX512_INS_COMMON(Inst, Suffix, src) \
37 CASE_AVX_INS_COMMON(Inst, Suffix, src) \
38 CASE_MASK_INS_COMMON(Inst, Suffix, src) \
39 CASE_MASKZ_INS_COMMON(Inst, Suffix, src)
41 #define CASE_MOVDUP(Inst, src) \
42 CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
43 CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
44 CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
45 CASE_AVX_INS_COMMON(Inst, , r##src) \
46 CASE_AVX_INS_COMMON(Inst, Y, r##src) \
47 CASE_SSE_INS_COMMON(Inst, r##src)
49 #define CASE_MASK_MOVDUP(Inst, src) \
50 CASE_MASK_INS_COMMON(Inst, Z, r##src) \
51 CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
52 CASE_MASK_INS_COMMON(Inst, Z128, r##src)
54 #define CASE_MASKZ_MOVDUP(Inst, src) \
55 CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
56 CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
57 CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
59 #define CASE_PMOVZX(Inst, src) \
60 CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
61 CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
62 CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
63 CASE_AVX_INS_COMMON(Inst, , r##src) \
64 CASE_AVX_INS_COMMON(Inst, Y, r##src) \
65 CASE_SSE_INS_COMMON(Inst, r##src)
67 #define CASE_MASK_PMOVZX(Inst, src) \
68 CASE_MASK_INS_COMMON(Inst, Z, r##src) \
69 CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
70 CASE_MASK_INS_COMMON(Inst, Z128, r##src)
72 #define CASE_MASKZ_PMOVZX(Inst, src) \
73 CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
74 CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
75 CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
77 #define CASE_UNPCK(Inst, src) \
78 CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
79 CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
80 CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
81 CASE_AVX_INS_COMMON(Inst, , r##src) \
82 CASE_AVX_INS_COMMON(Inst, Y, r##src) \
83 CASE_SSE_INS_COMMON(Inst, r##src)
85 #define CASE_MASK_UNPCK(Inst, src) \
86 CASE_MASK_INS_COMMON(Inst, Z, r##src) \
87 CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
88 CASE_MASK_INS_COMMON(Inst, Z128, r##src)
90 #define CASE_MASKZ_UNPCK(Inst, src) \
91 CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
92 CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
93 CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
95 #define CASE_SHUF(Inst, suf) \
96 CASE_AVX512_INS_COMMON(Inst, Z, suf) \
97 CASE_AVX512_INS_COMMON(Inst, Z256, suf) \
98 CASE_AVX512_INS_COMMON(Inst, Z128, suf) \
99 CASE_AVX_INS_COMMON(Inst, , suf) \
100 CASE_AVX_INS_COMMON(Inst, Y, suf) \
101 CASE_SSE_INS_COMMON(Inst, suf)
103 #define CASE_MASK_SHUF(Inst, src) \
104 CASE_MASK_INS_COMMON(Inst, Z, r##src##i) \
105 CASE_MASK_INS_COMMON(Inst, Z256, r##src##i) \
106 CASE_MASK_INS_COMMON(Inst, Z128, r##src##i)
108 #define CASE_MASKZ_SHUF(Inst, src) \
109 CASE_MASKZ_INS_COMMON(Inst, Z, r##src##i) \
110 CASE_MASKZ_INS_COMMON(Inst, Z256, r##src##i) \
111 CASE_MASKZ_INS_COMMON(Inst, Z128, r##src##i)
113 #define CASE_VPERMILPI(Inst, src) \
114 CASE_AVX512_INS_COMMON(Inst, Z, src##i) \
115 CASE_AVX512_INS_COMMON(Inst, Z256, src##i) \
116 CASE_AVX512_INS_COMMON(Inst, Z128, src##i) \
117 CASE_AVX_INS_COMMON(Inst, , src##i) \
118 CASE_AVX_INS_COMMON(Inst, Y, src##i)
120 #define CASE_MASK_VPERMILPI(Inst, src) \
121 CASE_MASK_INS_COMMON(Inst, Z, src##i) \
122 CASE_MASK_INS_COMMON(Inst, Z256, src##i) \
123 CASE_MASK_INS_COMMON(Inst, Z128, src##i)
125 #define CASE_MASKZ_VPERMILPI(Inst, src) \
126 CASE_MASKZ_INS_COMMON(Inst, Z, src##i) \
127 CASE_MASKZ_INS_COMMON(Inst, Z256, src##i) \
128 CASE_MASKZ_INS_COMMON(Inst, Z128, src##i)
130 #define CASE_VPERM(Inst, src) \
131 CASE_AVX512_INS_COMMON(Inst, Z, src##i) \
132 CASE_AVX512_INS_COMMON(Inst, Z256, src##i) \
133 CASE_AVX_INS_COMMON(Inst, Y, src##i)
135 #define CASE_MASK_VPERM(Inst, src) \
136 CASE_MASK_INS_COMMON(Inst, Z, src##i) \
137 CASE_MASK_INS_COMMON(Inst, Z256, src##i)
139 #define CASE_MASKZ_VPERM(Inst, src) \
140 CASE_MASKZ_INS_COMMON(Inst, Z, src##i) \
141 CASE_MASKZ_INS_COMMON(Inst, Z256, src##i)
143 #define CASE_VSHUF(Inst, src) \
144 CASE_AVX512_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
145 CASE_AVX512_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
146 CASE_AVX512_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
147 CASE_AVX512_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
149 #define CASE_MASK_VSHUF(Inst, src) \
150 CASE_MASK_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
151 CASE_MASK_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
152 CASE_MASK_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
153 CASE_MASK_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
155 #define CASE_MASKZ_VSHUF(Inst, src) \
156 CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
157 CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
158 CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
159 CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
162 if (X86::ZMM0 <= RegNo && RegNo <= X86::ZMM31)
164 if (X86::YMM0 <= RegNo && RegNo <= X86::YMM31)
166 if (X86::XMM0 <= RegNo && RegNo <= X86::XMM31)
168 if (X86::MM0 <= RegNo && RegNo <= X86::MM7)
175 unsigned OperandIndex) {
209 const char *(*getRegName)(
unsigned)) {
210 std::string OpMaskName(DestName);
212 bool MaskWithZero =
false;
213 const char *MaskRegName =
nullptr;
399 OpMaskName += MaskRegName;
404 OpMaskName +=
" {z}";
417 const char *(*getRegName)(
unsigned)) {
420 const char *DestName =
nullptr, *Src1Name =
nullptr, *Src2Name =
nullptr;
422 bool RegForm =
false;
429 case X86::BLENDPDrri:
430 case X86::VBLENDPDrri:
431 case X86::VBLENDPDYrri:
434 case X86::BLENDPDrmi:
435 case X86::VBLENDPDrmi:
436 case X86::VBLENDPDYrmi:
445 case X86::BLENDPSrri:
446 case X86::VBLENDPSrri:
447 case X86::VBLENDPSYrri:
450 case X86::BLENDPSrmi:
451 case X86::VBLENDPSrmi:
452 case X86::VBLENDPSYrmi:
461 case X86::PBLENDWrri:
462 case X86::VPBLENDWrri:
463 case X86::VPBLENDWYrri:
466 case X86::PBLENDWrmi:
467 case X86::VPBLENDWrmi:
468 case X86::VPBLENDWYrmi:
477 case X86::VPBLENDDrri:
478 case X86::VPBLENDDYrri:
481 case X86::VPBLENDDrmi:
482 case X86::VPBLENDDYrmi:
491 case X86::INSERTPSrr:
492 case X86::VINSERTPSrr:
493 case X86::VINSERTPSZrr:
496 case X86::INSERTPSrm:
497 case X86::VINSERTPSrm:
498 case X86::VINSERTPSZrm:
507 case X86::VMOVLHPSrr:
508 case X86::VMOVLHPSZrr:
516 case X86::VMOVHLPSrr:
517 case X86::VMOVHLPSZrr:
526 case X86::VMOVHPDZ128rm:
534 case X86::VMOVHPSZ128rm:
542 case X86::VMOVLPDZ128rm:
550 case X86::VMOVLPSZ128rm:
585 case X86::VPSLLDQYri:
586 case X86::VPSLLDQZ128rr:
587 case X86::VPSLLDQZ256rr:
588 case X86::VPSLLDQZ512rr:
590 case X86::VPSLLDQZ128rm:
591 case X86::VPSLLDQZ256rm:
592 case X86::VPSLLDQZ512rm:
602 case X86::VPSRLDQYri:
603 case X86::VPSRLDQZ128rr:
604 case X86::VPSRLDQZ256rr:
605 case X86::VPSRLDQZ512rr:
607 case X86::VPSRLDQZ128rm:
608 case X86::VPSRLDQZ256rm:
609 case X86::VPSRLDQZ512rm:
703 case X86::MMX_PSHUFWri:
707 case X86::MMX_PSHUFWmi:
725 case X86::MMX_PUNPCKHBWirr:
731 case X86::MMX_PUNPCKHBWirm:
738 case X86::MMX_PUNPCKHWDirr:
744 case X86::MMX_PUNPCKHWDirm:
751 case X86::MMX_PUNPCKHDQirr:
757 case X86::MMX_PUNPCKHDQirm:
775 case X86::MMX_PUNPCKLBWirr:
781 case X86::MMX_PUNPCKLBWirm:
788 case X86::MMX_PUNPCKLWDirr:
794 case X86::MMX_PUNPCKLWDirm:
801 case X86::MMX_PUNPCKLDQirr:
807 case X86::MMX_PUNPCKLDQirm:
946 case X86::VPERM2F128rr:
947 case X86::VPERM2I128rr:
951 case X86::VPERM2F128rm:
952 case X86::VPERM2I128rm:
1002 case X86::VMOVSSZrr:
1009 case X86::VMOVSSZrm:
1014 case X86::MOVPQI2QIrr:
1015 case X86::MOVZPQILo2PQIrr:
1016 case X86::VMOVPQI2QIrr:
1017 case X86::VMOVZPQILo2PQIrr:
1018 case X86::VMOVZPQILo2PQIZrr:
1022 case X86::MOVQI2PQIrm:
1023 case X86::VMOVQI2PQIrm:
1024 case X86::VMOVQI2PQIZrm:
1029 case X86::MOVDI2PDIrm:
1030 case X86::VMOVDI2PDIrm:
1031 case X86::VMOVDI2PDIZrm:
1059 case X86::VBROADCASTF128:
1060 case X86::VBROADCASTI128:
1144 if (ShuffleMask.
empty())
1147 if (!DestName) DestName = Src1Name;
1148 OS << (DestName ?
getMaskName(MI, DestName, getRegName) :
"mem") <<
" = ";
1152 if (Src1Name == Src2Name) {
1153 for (
unsigned i = 0, e = ShuffleMask.
size();
i != e; ++
i) {
1154 if ((
int)ShuffleMask[
i] >= 0 &&
1155 ShuffleMask[
i] >= (int)e)
1156 ShuffleMask[
i] -= e;
1163 for (
unsigned i = 0, e = ShuffleMask.
size();
i != e; ++
i) {
1173 bool isSrc1 = ShuffleMask[
i] < (int)ShuffleMask.
size();
1174 const char *SrcName = isSrc1 ? Src1Name : Src2Name;
1175 OS << (SrcName ? SrcName :
"mem") <<
'[';
1176 bool IsFirst =
true;
1178 (ShuffleMask[
i] < (int)ShuffleMask.
size()) == isSrc1) {
1186 OS << ShuffleMask[
i] % ShuffleMask.
size();
static MVT getVectorVT(MVT VT, unsigned NumElements)
void DecodePSLLDQMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
void DecodeScalarMoveMask(MVT VT, bool IsLoad, SmallVectorImpl< int > &Mask)
Decode a scalar float move instruction as a shuffle mask.
void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
unsigned getSizeInBits() const
void DecodeUNPCKLMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd and punpckl*.
SSE4A Extraction and Insertion.
void DecodeMOVDDUPMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
LLVM_NODISCARD bool empty() const
void DecodeSubVectorBroadcast(MVT DstVT, MVT SrcVT, SmallVectorImpl< int > &ShuffleMask)
Decodes a broadcast of a subvector to a larger vector type.
void DecodeEXTRQIMask(int Len, int Idx, SmallVectorImpl< int > &ShuffleMask)
Decode a SSE4A EXTRQ instruction as a v16i8 shuffle mask.
void DecodeINSERTQIMask(int Len, int Idx, SmallVectorImpl< int > &ShuffleMask)
Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
unsigned getReg() const
Returns the register number.
void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
Decode a move lower and zero upper instruction as a shuffle mask.
Instances of this class represent a single low-level machine instruction.
void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
Decode a BLEND immediate mask into a shuffle mask.
void DecodeMOVSHDUPMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl< int > &ShuffleMask)
Decode a MOVLHPS instruction as a v2f64/v4f32 shuffle mask.
MVT - Machine Value Type.
void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
DecodeSHUFPMask - This decodes the shuffle masks for shufp*.
void DecodePSHUFLWMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
Decodes the shuffle masks for pshuflw.
void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
Decode a 128-bit INSERTPS instruction as a v4f32 shuffle mask.
void DecodeZeroExtendMask(MVT SrcScalarVT, MVT DstVT, SmallVectorImpl< int > &Mask)
Decode a zero extension instruction as a shuffle mask.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
void DecodeVALIGNMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
DecodePSHUFMask - This decodes the shuffle masks for pshufw, pshufd, and vpermilp*.
unsigned getOpcode() const
void DecodePSWAPMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
Decodes a PSWAPD 3DNow! instruction.
void DecodeUNPCKHMask(MVT VT, SmallVectorImpl< int > &ShuffleMask)
DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd and punpckh*.
void DecodeVPERM2X128Mask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
void DecodeInsertElementMask(MVT VT, unsigned Idx, unsigned Len, SmallVectorImpl< int > &ShuffleMask)
void decodeVSHUF64x2FamilyMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
Decode a shuffle packed values at 128-bit granularity (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2) immed...
void DecodePSHUFHWMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
Decodes the shuffle masks for pshufhw.
void DecodeVPERMMask(MVT VT, unsigned Imm, SmallVectorImpl< int > &ShuffleMask)
DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
unsigned getNumOperands() const
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This class implements an extremely fast bulk output stream that can only output to a stream...
void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl< int > &ShuffleMask)
Decode a MOVHLPS instruction as a v2f64/v4f32 shuffle mask.
const MCOperand & getOperand(unsigned i) const
bool EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, const char *(*getRegName)(unsigned))
EmitAnyX86InstComments - This function decodes x86 instructions and prints newline terminated strings...