23 assert(ChunkIdx < 4 &&
"Out of range chunk index specified!");
25 return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
31 Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
50 for (
unsigned Idx = 0; Idx < 4; ++Idx)
54 for (
const auto &Chunk : Counts) {
55 const uint64_t ChunkVal = Chunk.first;
56 const unsigned Count = Chunk.second;
62 if ((Count != 2 && Count != 3) || !
canUseOrr(ChunkVal, Encoding))
65 const bool CountThree = Count == 3;
67 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
69 unsigned ShiftAmt = 0;
72 for (; ShiftAmt < 64; ShiftAmt += 16) {
73 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
75 if (
Imm16 != ChunkVal)
89 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
90 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
92 if (
Imm16 != ChunkVal)
129 Imm &= ~(
Mask << (Idx * 16));
132 Imm |=
Mask << (Idx * 16);
152 const int NotSet = -1;
155 int StartIdx = NotSet;
158 for (
int Idx = 0; Idx < 4; ++Idx) {
159 int64_t Chunk =
getChunk(UImm, Idx);
161 Chunk = (Chunk << 48) >> 48;
170 if (StartIdx == NotSet || EndIdx == NotSet)
181 if (StartIdx > EndIdx) {
187 int FirstMovkIdx = NotSet;
188 int SecondMovkIdx = NotSet;
192 for (
int Idx = 0; Idx < 4; ++Idx) {
197 if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
198 OrrImm =
updateImm(OrrImm, Idx, Outside == 0);
201 if (FirstMovkIdx == NotSet)
208 }
else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
212 if (FirstMovkIdx == NotSet)
218 assert(FirstMovkIdx != NotSet &&
"Constant materializable with single ORR!");
223 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
225 const bool SingleMovk = SecondMovkIdx == NotSet;
226 Insn.push_back({ AArch64::MOVKXi,
getChunk(UImm, FirstMovkIdx),
228 FirstMovkIdx * 16) });
235 Insn.push_back({ AArch64::MOVKXi,
getChunk(UImm, SecondMovkIdx),
237 SecondMovkIdx * 16) });
245 unsigned OneChunks,
unsigned ZeroChunks,
247 const unsigned Mask = 0xFFFF;
256 if (OneChunks > ZeroChunks) {
263 Imm &= (1LL << 32) - 1;
264 FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
266 FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
269 unsigned LastShift = 0;
273 Shift = (TZ / 16) * 16;
274 LastShift = ((63 - LZ) / 16) * 16;
281 if (
Shift == LastShift)
289 unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
290 while (
Shift < LastShift) {
305 const unsigned Mask = 0xFFFF;
309 unsigned OneChunks = 0;
310 unsigned ZeroChunks = 0;
312 const unsigned Chunk = (Imm >>
Shift) &
Mask;
320 if ((BitSize / 16) - OneChunks <= 1 || (BitSize / 16) - ZeroChunks <= 1) {
326 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
329 unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
330 Insn.push_back({ Opc, 0, Encoding });
338 if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2) {
343 assert(BitSize == 64 &&
"All 32-bit immediates can be expanded with a"
356 uint64_t ZeroChunk = UImm & ~ShiftedMask;
357 uint64_t OneChunk = UImm | ShiftedMask;
358 uint64_t RotatedImm = (UImm << 32) | (UImm >> 32);
359 uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
365 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
383 if (OneChunks || ZeroChunks) {