18 namespace RISCVMatInt {
28 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
29 int64_t Lo12 = SignExtend64<12>(Val);
34 if (Lo12 || Hi20 == 0) {
35 unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
41 assert(IsRV64 &&
"Can't emit >32-bit imm for non-RV64 target");
66 int64_t Lo12 = SignExtend64<12>(Val);
67 int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
69 Hi52 =
SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
79 int PlatRegSize = IsRV64 ? 64 : 32;
84 for (
unsigned ShiftVal = 0; ShiftVal <
Size; ShiftVal += PlatRegSize) {
88 Cost += MatSeq.
size();
This class represents lattice values for constants.
void push_back(const T &Elt)
int64_t getSExtValue() const
Get sign extended value.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
T findFirstSet(T Val, ZeroBehavior ZB=ZB_Max)
Get the index of the first set bit starting from the least significant bit.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
constexpr bool isInt< 32 >(int64_t x)
Align max(MaybeAlign Lhs, Align Rhs)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Class for arbitrary precision integers.
int getIntMatCost(const APInt &Val, unsigned Size, bool IsRV64)
void generateInstSeq(int64_t Val, bool IsRV64, InstSeq &Res)
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())