23 assert(ChunkIdx < 4 &&
"Out of range chunk index specified!");
25 return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
31 Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
54 for (
const auto &Chunk : Counts) {
55 const uint64_t ChunkVal = Chunk.first;
56 const unsigned Count = Chunk.second;
62 if ((Count != 2 && Count != 3) || !
canUseOrr(ChunkVal, Encoding))
65 const bool CountThree = Count == 3;
67 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
69 unsigned ShiftAmt = 0;
72 for (; ShiftAmt < 64; ShiftAmt += 16) {
73 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
75 if (Imm16 != ChunkVal)
80 Insn.push_back({ AArch64::MOVKXi, Imm16,
89 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
90 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
92 if (Imm16 != ChunkVal)
95 Insn.push_back({ AArch64::MOVKXi, Imm16,
107 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
117 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
129 Imm &= ~(Mask << (
Idx * 16));
132 Imm |= Mask << (
Idx * 16);
152 const int NotSet = -1;
155 int StartIdx = NotSet;
161 Chunk = (Chunk << 48) >> 48;
170 if (StartIdx == NotSet || EndIdx == NotSet)
181 if (StartIdx > EndIdx) {
187 int FirstMovkIdx = NotSet;
188 int SecondMovkIdx = NotSet;
197 if ((
Idx < StartIdx || EndIdx <
Idx) && Chunk != Outside) {
201 if (FirstMovkIdx == NotSet)
208 }
else if (
Idx > StartIdx &&
Idx < EndIdx && Chunk != Inside) {
212 if (FirstMovkIdx == NotSet)
218 assert(FirstMovkIdx != NotSet &&
"Constant materializable with single ORR!");
223 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
225 const bool SingleMovk = SecondMovkIdx == NotSet;
226 Insn.push_back({ AArch64::MOVKXi,
getChunk(UImm, FirstMovkIdx),
228 FirstMovkIdx * 16) });
235 Insn.push_back({ AArch64::MOVKXi,
getChunk(UImm, SecondMovkIdx),
237 SecondMovkIdx * 16) });
247 UnshiftedOnes = ~0ULL;
249 UnshiftedOnes = (1ULL << NumOnes) - 1;
251 return UnshiftedOnes << StartPosition;
259 uint64_t Rotation = 1ULL << (6 - i);
260 uint64_t Closure = Result | llvm::rotl<uint64_t>(Result, Rotation);
261 if (Closure != (Closure & V)) {
287static std::optional<std::pair<uint64_t, uint64_t>>
289 if (UImm == 0 || ~UImm == 0)
294 uint64_t RotatedBits = llvm::rotr<uint64_t>(UImm, InitialTrailingOnes);
300 uint64_t RemainingBits = RotatedBits & ~MaximalImm1;
307 if (RemainingBits & ~MaximalImm2)
311 return std::make_pair(
rotl(MaximalImm1, InitialTrailingOnes),
312 rotl(MaximalImm2, InitialTrailingOnes));
319 if (MaybeDecomposition == std::nullopt)
321 uint64_t Imm1 = MaybeDecomposition->first;
322 uint64_t Imm2 = MaybeDecomposition->second;
328 if (Imm1Success && Imm2Success) {
330 Insn.push_back({AArch64::ORRXri, 0, Encoding1});
331 Insn.push_back({AArch64::ORRXri, 1, Encoding2});
345 if (MaybeDecomposition == std::nullopt)
347 uint64_t Imm1 = MaybeDecomposition->first;
348 uint64_t Imm2 = MaybeDecomposition->second;
354 if (Imm1Success && Imm2Success) {
356 Insn.push_back({AArch64::ORRXri, 0, Encoding1});
358 Insn.push_back({AArch64::ANDXri, 1, Encoding2});
377 unsigned BigSize = 64;
381 uint64_t Mask = (1ULL << BigSize) - 1;
383 if ((Imm & Mask) != ((Imm >> BigSize) & Mask)) {
387 }
while (BigSize > 2);
394 uint64_t RunStarts = Imm & ~rotl<uint64_t>(Imm, 1);
401 int RunsPerBigChunk =
popcount(RunStarts & BigMask);
403 static const int8_t BigToSmallSizeTable[32] = {
404 -1, -1, 0, 1, 2, 2, -1, 3, 3, 3, -1, -1, -1, -1, -1, 4,
405 4, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5,
408 int BigToSmallShift = BigToSmallSizeTable[RunsPerBigChunk];
412 if (BigToSmallShift == -1)
415 unsigned SmallSize = BigSize >> BigToSmallShift;
418 static const uint64_t RepeatedOnesTable[] = {
419 0xffffffffffffffff, 0x5555555555555555, 0x1111111111111111,
420 0x0101010101010101, 0x0001000100010001, 0x0000000100000001,
436 uint64_t RotatedImm = rotr<uint64_t>(Imm, Rotation);
437 for (
int Attempt = 0; Attempt < 3; ++Attempt) {
444 rotl<uint64_t>((SmallOnes << RunLength) - SmallOnes, Rotation);
451 Insn.push_back({AArch64::ORRXri, 0, SmallEncoding});
452 Insn.push_back({AArch64::EORXri, 1, BigEncoding});
457 Rotation +=
countr_zero(rotr<uint64_t>(RunStarts, Rotation) & ~1);
458 RotatedImm = rotr<uint64_t>(Imm, Rotation);
467 unsigned OneChunks,
unsigned ZeroChunks,
469 const unsigned Mask = 0xFFFF;
478 if (OneChunks > ZeroChunks) {
485 Imm &= (1LL << 32) - 1;
486 FirstOpc = (
isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
488 FirstOpc = (
isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
491 unsigned LastShift = 0;
495 Shift = (TZ / 16) * 16;
496 LastShift = ((63 - LZ) / 16) * 16;
498 unsigned Imm16 = (Imm >> Shift) & Mask;
500 Insn.push_back({ FirstOpc, Imm16,
503 if (Shift == LastShift)
511 unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
512 while (Shift < LastShift) {
514 Imm16 = (Imm >> Shift) & Mask;
515 if (Imm16 == (
isNeg ? Mask : 0))
518 Insn.push_back({ Opc, Imm16,
524 if (
Insn.size() > 2 && (Imm >> 32) == (Imm & 0xffffffffULL)) {
527 Insn.push_back({AArch64::ORRXrs, 0, 32});
535 const unsigned Mask = 0xFFFF;
539 unsigned OneChunks = 0;
540 unsigned ZeroChunks = 0;
541 for (
unsigned Shift = 0; Shift < BitSize; Shift += 16) {
542 const unsigned Chunk = (Imm >> Shift) & Mask;
550 if ((BitSize / 16) - OneChunks <= 1 || (BitSize / 16) - ZeroChunks <= 1) {
556 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
559 unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
560 Insn.push_back({ Opc, 0, Encoding });
568 if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2) {
573 assert(BitSize == 64 &&
"All 32-bit immediates can be expanded with a"
584 for (
unsigned Shift = 0; Shift < BitSize; Shift += 16) {
585 uint64_t ShiftedMask = (0xFFFFULL << Shift);
586 uint64_t ZeroChunk = UImm & ~ShiftedMask;
587 uint64_t OneChunk = UImm | ShiftedMask;
588 uint64_t RotatedImm = (UImm << 32) | (UImm >> 32);
589 uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
595 Insn.push_back({ AArch64::ORRXri, 0, Encoding });
598 const unsigned Imm16 =
getChunk(UImm, Shift / 16);
599 Insn.push_back({ AArch64::MOVKXi, Imm16,
625 if (OneChunks || ZeroChunks) {
static uint64_t GetRunOfOnesStartingAt(uint64_t V, uint64_t StartPosition)
static void expandMOVImmSimple(uint64_t Imm, unsigned BitSize, unsigned OneChunks, unsigned ZeroChunks, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to a MOVZ or MOVN of width BitSize followed by up ...
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear)
Clear or set all bits in the chunk at the given index.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding)
Check whether the given 16-bit chunk replicated to full 64-bit width can be materialized with an ORR ...
static bool tryToreplicateChunks(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
Check for identical 16-bit chunks within the constant and if so materialize them with a single ORR in...
static bool trySequenceOfOnes(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
Check whether the constant contains a sequence of contiguous ones, which might be interrupted by one ...
static uint64_t MaximallyReplicateSubImmediate(uint64_t V, uint64_t Subset)
static bool tryAndOfLogicalImmediates(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx)
Helper function which extracts the specified 16-bit chunk from a 64-bit value.
static bool tryEorOfLogicalImmediates(uint64_t Imm, SmallVectorImpl< ImmInsnModel > &Insn)
static uint64_t maximalLogicalImmWithin(uint64_t RemainingBits, uint64_t OriginalBits)
static bool isStartChunk(uint64_t Chunk)
Check whether this chunk matches the pattern '1...0...'.
static bool isEndChunk(uint64_t Chunk)
Check whether this chunk matches the pattern '0...1...' This pattern ends a contiguous sequence of on...
static bool tryOrrOfLogicalImmediates(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
static std::optional< std::pair< uint64_t, uint64_t > > decomposeIntoOrrOfLogicalImmediates(uint64_t UImm)
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
static bool isNeg(Value *V)
Returns true if the operation is a negation of V, and it works for both integers and floats.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static bool processLogicalImmediate(uint64_t Imm, unsigned RegSize, uint64_t &Encoding)
processLogicalImmediate - Determine if an immediate value can be encoded as the immediate operand of ...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
This is an optimization pass for GlobalISel generic memory operations.
int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr T rotl(T V, int R)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.