Go to the documentation of this file.
25 auto getU = [](
uint64_t N) {
return N >> 32; };
26 auto getL = [](
uint64_t N) {
return N & UINT32_MAX; };
30 uint64_t P1 = UL * UR,
P2 = UL * LR, P3 = LL * UR, P4 = LL * LR;
35 uint64_t NewLower = Lower + (getL(
N) << 32);
36 Upper += getU(
N) + (NewLower < Lower);
44 return std::make_pair(Lower, 0);
48 int Shift = 64 - LeadingZeros;
50 Upper = Upper << LeadingZeros | Lower >>
Shift;
52 Shift && (Lower & UINT64_C(1) << (
Shift - 1)));
59 assert(Dividend &&
"expected non-zero dividend");
60 assert(Divisor &&
"expected non-zero divisor");
69 uint64_t Quotient = Dividend64 / Divisor;
70 uint64_t Remainder = Dividend64 % Divisor;
73 if (Quotient > UINT32_MAX)
74 return getAdjusted<uint32_t>(Quotient,
Shift);
77 return getRounded<uint32_t>(Quotient,
Shift, Remainder >=
getHalf(Divisor));
82 assert(Dividend &&
"expected non-zero dividend");
83 assert(Divisor &&
"expected non-zero divisor");
94 return std::make_pair(Dividend,
Shift);
103 uint64_t Quotient = Dividend / Divisor;
107 while (!(Quotient >> 63) && Dividend) {
109 bool IsOverflow = Dividend >> 63;
115 if (IsOverflow || Divisor <= Dividend) {
125 assert(ScaleDiff >= 0 &&
"wrong argument order");
126 assert(ScaleDiff < 64 &&
"numbers too far apart");
128 uint64_t L_adjusted = L >> ScaleDiff;
134 return L > L_adjusted << ScaleDiff ? 1 : 0;
169 int Shift = 63 - (NewE -
E);
177 unsigned AdjustedE =
E + 16383;
187 Float.toString(Chars, Precision, 0);
188 return std::string(Chars.begin(), Chars.end());
192 size_t NonZero = Float.find_last_not_of(
'0');
193 assert(NonZero != std::string::npos &&
"no . in floating point string");
195 if (Float[NonZero] ==
'.')
198 return Float.substr(0, NonZero + 1);
202 unsigned Precision) {
221 }
else if (
E > -64) {
223 Below0 =
D << (64 +
E);
224 }
else if (
E == -64) {
227 }
else if (
E > -120) {
228 Below0 =
D >> (-
E - 64);
229 Extra =
D << (128 +
E);
230 ExtraShift = -64 -
E;
234 if (!Above0 && !Below0)
239 size_t DigitsOut = 0;
242 DigitsOut = Str.size();
257 Extra = (Below0 & 0xf) << 56 | (Extra >> 8);
260 size_t AfterDot = Str.size();
270 Below0 += (Extra >> 60);
274 if (DigitsOut || Str.back() !=
'0')
277 }
while (
Error && (Below0 << 4 | Extra >> 60) >=
Error / 2 &&
278 (!Precision || DigitsOut <= Precision || SinceDot < 2));
281 if (!Precision || DigitsOut <= Precision)
286 std::max(Str.size() - (DigitsOut - Precision), AfterDot + 1);
289 if (Truncate >= Str.size())
297 for (std::string::reverse_iterator
I(Str.begin() + Truncate),
E = Str.rend();
316 int Width,
unsigned Precision) {
static bool doesRoundUp(char Digit)
This is an optimization pass for GlobalISel generic memory operations.
static void appendDigit(std::string &Str, unsigned D)
static const fltSemantics & x87DoubleExtended() LLVM_READNONE
const int32_t MaxScale
Maximum scale; same as APFloat for easy debug printing.
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
const int32_t MinScale
Maximum scale; same as APFloat for easy debug printing.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static uint64_t getHalf(uint64_t N)
This might compile to this xmm1 xorps xmm0 movss xmm0 ret Now consider if the code caused xmm1 to get spilled This might produce this xmm1 movaps xmm0 movaps xmm1 movss xmm0 ret since the reload is only used by these we could fold it into the producing something like xmm1 movaps xmm0 ret saving two instructions The basic idea is that a reload from a spill if only one byte chunk is bring in zeros the one element instead of elements This can be used to simplify a variety of shuffle where the elements are fixed zeros This code generates ugly probably due to costs being off or< 4 x float > * P2
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This class implements an extremely fast bulk output stream that can only output to a stream.
std::pair< uint32_t, int16_t > divide32(uint32_t Dividend, uint32_t Divisor)
Divide two 32-bit integers to create a 32-bit scaled number.
unsigned countTrailingZeros(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
static raw_ostream & print(raw_ostream &OS, uint64_t D, int16_t E, int Width, unsigned Precision)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static void appendNumber(std::string &Str, uint64_t N)
static std::string toStringAPFloat(uint64_t D, int E, unsigned Precision)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::pair< DigitsT, int16_t > getRounded(DigitsT Digits, int16_t Scale, bool ShouldRound)
Conditionally round up a scaled number.
Class for arbitrary precision integers.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr)
static std::string stripTrailingZeros(const std::string &Float)
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
int compareImpl(uint64_t L, uint64_t R, int ScaleDiff)
Implementation for comparing scaled numbers.
static std::string toString(uint64_t D, int16_t E, int Width, unsigned Precision)
std::pair< uint64_t, int16_t > multiply64(uint64_t LHS, uint64_t RHS)
Multiply two 64-bit integers to create a 64-bit scaled number.
const char * toString(DWARFSectionKind Kind)
Lightweight error class with error context and mandatory checking.
static int countLeadingZeros64(uint64_t N)
std::pair< uint64_t, int16_t > divide64(uint64_t Dividend, uint64_t Divisor)
Divide two 64-bit integers to create a 64-bit scaled number.
unsigned countLeadingZeros(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
auto reverse(ContainerTy &&C)
static void dump(uint64_t D, int16_t E, int Width)