23 assert(ChunkIdx < 4 &&
"Out of range chunk index specified!");
25 return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
31 Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
54 for (
const auto &Chunk : Counts) {
55 const uint64_t ChunkVal = Chunk.first;
56 const unsigned Count = Chunk.second;
62 if ((Count != 2 && Count != 3) || !
canUseOrr(ChunkVal, Encoding))
65 const bool CountThree = Count == 3;
67 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
69 unsigned ShiftAmt = 0;
72 for (; ShiftAmt < 64; ShiftAmt += 16) {
73 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
75 if (Imm16 != ChunkVal)
80 Insn.push_back({ AArch64::MOVKXi, Imm16,
89 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
90 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
92 if (Imm16 != ChunkVal)
95 Insn.push_back({ AArch64::MOVKXi, Imm16,
107 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
117 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
129 Imm &= ~(Mask << (
Idx * 16));
132 Imm |= Mask << (
Idx * 16);
152 const int NotSet = -1;
155 int StartIdx = NotSet;
161 Chunk = (Chunk << 48) >> 48;
170 if (StartIdx == NotSet || EndIdx == NotSet)
181 if (StartIdx > EndIdx) {
187 int FirstMovkIdx = NotSet;
188 int SecondMovkIdx = NotSet;
197 if ((
Idx < StartIdx || EndIdx <
Idx) && Chunk != Outside) {
201 if (FirstMovkIdx == NotSet)
208 }
else if (
Idx > StartIdx &&
Idx < EndIdx && Chunk != Inside) {
212 if (FirstMovkIdx == NotSet)
218 assert(FirstMovkIdx != NotSet &&
"Constant materializable with single ORR!");
223 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
225 const bool SingleMovk = SecondMovkIdx == NotSet;
226 Insn.push_back({ AArch64::MOVKXi,
getChunk(UImm, FirstMovkIdx),
228 FirstMovkIdx * 16) });
235 Insn.push_back({ AArch64::MOVKXi,
getChunk(UImm, SecondMovkIdx),
237 SecondMovkIdx * 16) });
247 UnshiftedOnes = ~0ULL;
249 UnshiftedOnes = (1ULL << NumOnes) - 1;
251 return UnshiftedOnes << StartPosition;
259 uint64_t Rotation = 1ULL << (6 - i);
260 uint64_t Closure = Result | llvm::rotl<uint64_t>(Result, Rotation);
261 if (Closure != (Closure & V)) {
287static std::optional<std::pair<uint64_t, uint64_t>>
289 if (UImm == 0 || ~UImm == 0)
294 uint64_t RotatedBits = llvm::rotr<uint64_t>(UImm, InitialTrailingOnes);
300 uint64_t RemainingBits = RotatedBits & ~MaximalImm1;
307 if (RemainingBits & ~MaximalImm2)
311 return std::make_pair(
rotl(MaximalImm1, InitialTrailingOnes),
312 rotl(MaximalImm2, InitialTrailingOnes));
319 if (MaybeDecomposition == std::nullopt)
321 uint64_t Imm1 = MaybeDecomposition->first;
322 uint64_t Imm2 = MaybeDecomposition->second;
328 if (Imm1Success && Imm2Success) {
330 Insn.push_back({AArch64::ORRXri, 0, Encoding1});
331 Insn.push_back({AArch64::ORRXri, 1, Encoding2});
345 if (MaybeDecomposition == std::nullopt)
347 uint64_t Imm1 = MaybeDecomposition->first;
348 uint64_t Imm2 = MaybeDecomposition->second;
354 if (Imm1Success && Imm2Success) {
356 Insn.push_back({AArch64::ORRXri, 0, Encoding1});
358 Insn.push_back({AArch64::ANDXri, 1, Encoding2});
368 unsigned OneChunks,
unsigned ZeroChunks,
370 const unsigned Mask = 0xFFFF;
379 if (OneChunks > ZeroChunks) {
386 Imm &= (1LL << 32) - 1;
387 FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
389 FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
392 unsigned LastShift = 0;
396 Shift = (TZ / 16) * 16;
397 LastShift = ((63 - LZ) / 16) * 16;
399 unsigned Imm16 = (Imm >> Shift) & Mask;
401 Insn.push_back({ FirstOpc, Imm16,
404 if (Shift == LastShift)
412 unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
413 while (Shift < LastShift) {
415 Imm16 = (Imm >> Shift) & Mask;
416 if (Imm16 == (isNeg ? Mask : 0))
419 Insn.push_back({ Opc, Imm16,
428 const unsigned Mask = 0xFFFF;
432 unsigned OneChunks = 0;
433 unsigned ZeroChunks = 0;
434 for (
unsigned Shift = 0; Shift < BitSize; Shift += 16) {
435 const unsigned Chunk = (Imm >> Shift) & Mask;
443 if ((BitSize / 16) - OneChunks <= 1 || (BitSize / 16) - ZeroChunks <= 1) {
449 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
452 unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
453 Insn.push_back({ Opc, 0, Encoding });
461 if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2) {
466 assert(BitSize == 64 &&
"All 32-bit immediates can be expanded with a"
477 for (
unsigned Shift = 0; Shift < BitSize; Shift += 16) {
478 uint64_t ShiftedMask = (0xFFFFULL << Shift);
479 uint64_t ZeroChunk = UImm & ~ShiftedMask;
480 uint64_t OneChunk = UImm | ShiftedMask;
481 uint64_t RotatedImm = (UImm << 32) | (UImm >> 32);
482 uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
488 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
491 const unsigned Imm16 =
getChunk(UImm, Shift / 16);
492 Insn.push_back({ AArch64::MOVKXi, Imm16,
514 if (OneChunks || ZeroChunks) {
static uint64_t GetRunOfOnesStartingAt(uint64_t V, uint64_t StartPosition)
static void expandMOVImmSimple(uint64_t Imm, unsigned BitSize, unsigned OneChunks, unsigned ZeroChunks, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to a MOVZ or MOVN of width BitSize followed by up ...
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear)
Clear or set all bits in the chunk at the given index.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding)
Check whether the given 16-bit chunk replicated to full 64-bit width can be materialized with an ORR ...
static bool tryToreplicateChunks(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
Check for identical 16-bit chunks within the constant and if so materialize them with a single ORR in...
static bool trySequenceOfOnes(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
Check whether the constant contains a sequence of contiguous ones, which might be interrupted by one ...
static uint64_t MaximallyReplicateSubImmediate(uint64_t V, uint64_t Subset)
static bool tryAndOfLogicalImmediates(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx)
Helper function which extracts the specified 16-bit chunk from a 64-bit value.
static uint64_t maximalLogicalImmWithin(uint64_t RemainingBits, uint64_t OriginalBits)
static bool isStartChunk(uint64_t Chunk)
Check whether this chunk matches the pattern '1...0...'.
static bool isEndChunk(uint64_t Chunk)
Check whether this chunk matches the pattern '0...1...' This pattern ends a contiguous sequence of on...
static bool tryOrrOfLogicalImmediates(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
static std::optional< std::pair< uint64_t, uint64_t > > decomposeIntoOrrOfLogicalImmediates(uint64_t UImm)
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static bool processLogicalImmediate(uint64_t Imm, unsigned RegSize, uint64_t &Encoding)
processLogicalImmediate - Determine if an immediate value can be encoded as the immediate operand of ...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
This is an optimization pass for GlobalISel generic memory operations.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr T rotl(T V, int R)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.