Bug Summary

File:compiler-rt/lib/builtins/fp_div_impl.inc
Warning:line 233, column 3
Value stored to 'x_UQ0' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name divdf3.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -target-cpu i686 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/projects/compiler-rt/lib/builtins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D VISIBILITY_HIDDEN -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/projects/compiler-rt/lib/builtins -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/builtins -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -Wno-unused-parameter -std=c11 -fconst-strings -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/projects/compiler-rt/lib/builtins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/builtins/divdf3.c
1//===-- fp_div_impl.inc - Floating point division -----------------*- C -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements soft-float division with the IEEE-754 default
10// rounding (to nearest, ties to even).
11//
12//===----------------------------------------------------------------------===//
13
14#include "fp_lib.h"
15
16// The __divXf3__ function implements Newton-Raphson floating point division.
17// It uses 3 iterations for float32, 4 for float64 and 5 for float128,
18// respectively. Due to number of significant bits being roughly doubled
19// every iteration, the two modes are supported: N full-width iterations (as
20// it is done for float32 by default) and (N-1) half-width iteration plus one
21// final full-width iteration. It is expected that half-width integer
22// operations (w.r.t rep_t size) can be performed faster for some hardware but
23// they require error estimations to be computed separately due to larger
24// computational errors caused by truncating intermediate results.
25
26// Half the bit-size of rep_t
27#define HW((sizeof(rep_t) * 8) / 2) (typeWidth(sizeof(rep_t) * 8) / 2)
28// rep_t-sized bitmask with lower half of bits set to ones
29#define loMask(-1ULL >> ((sizeof(rep_t) * 8) / 2)) (REP_C(-1)-1ULL >> HW((sizeof(rep_t) * 8) / 2))
30
31#if NUMBER_OF_FULL_ITERATIONS1 < 1
32#error At least one full iteration is required
33#endif
34
35static __inline fp_t __divXf3__(fp_t a, fp_t b) {
36
37 const unsigned int aExponent = toRep(a) >> significandBits52 & maxExponent((1 << ((sizeof(rep_t) * 8) - 52 - 1)) - 1);
38 const unsigned int bExponent = toRep(b) >> significandBits52 & maxExponent((1 << ((sizeof(rep_t) * 8) - 52 - 1)) - 1);
39 const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit(1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1)));
40
41 rep_t aSignificand = toRep(a) & significandMask((1ULL << 52) - 1U);
42 rep_t bSignificand = toRep(b) & significandMask((1ULL << 52) - 1U);
43 int scale = 0;
44
45 // Detect if a or b is zero, denormal, infinity, or NaN.
46 if (aExponent - 1U >= maxExponent((1 << ((sizeof(rep_t) * 8) - 52 - 1)) - 1) - 1U ||
47 bExponent - 1U >= maxExponent((1 << ((sizeof(rep_t) * 8) - 52 - 1)) - 1) - 1U) {
48
49 const rep_t aAbs = toRep(a) & absMask((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U);
50 const rep_t bAbs = toRep(b) & absMask((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U);
51
52 // NaN / anything = qNaN
53 if (aAbs > infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
)
54 return fromRep(toRep(a) | quietBit((1ULL << 52) >> 1));
55 // anything / NaN = qNaN
56 if (bAbs > infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
)
57 return fromRep(toRep(b) | quietBit((1ULL << 52) >> 1));
58
59 if (aAbs == infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
) {
60 // infinity / infinity = NaN
61 if (bAbs == infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
)
62 return fromRep(qnanRep((((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U
) ^ ((1ULL << 52) - 1U)) | ((1ULL << 52) >>
1))
);
63 // infinity / anything else = +/- infinity
64 else
65 return fromRep(aAbs | quotientSign);
66 }
67
68 // anything else / infinity = +/- 0
69 if (bAbs == infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
)
70 return fromRep(quotientSign);
71
72 if (!aAbs) {
73 // zero / zero = NaN
74 if (!bAbs)
75 return fromRep(qnanRep((((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U
) ^ ((1ULL << 52) - 1U)) | ((1ULL << 52) >>
1))
);
76 // zero / anything else = +/- zero
77 else
78 return fromRep(quotientSign);
79 }
80 // anything else / zero = +/- infinity
81 if (!bAbs)
82 return fromRep(infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
| quotientSign);
83
84 // One or both of a or b is denormal. The other (if applicable) is a
85 // normal number. Renormalize one or both of a and b, and set scale to
86 // include the necessary exponent adjustment.
87 if (aAbs < implicitBit(1ULL << 52))
88 scale += normalize(&aSignificand);
89 if (bAbs < implicitBit(1ULL << 52))
90 scale -= normalize(&bSignificand);
91 }
92
93 // Set the implicit significand bit. If we fell through from the
94 // denormal path it was already set by normalize( ), but setting it twice
95 // won't hurt anything.
96 aSignificand |= implicitBit(1ULL << 52);
97 bSignificand |= implicitBit(1ULL << 52);
98
99 int writtenExponent = (aExponent - bExponent + scale) + exponentBias(((1 << ((sizeof(rep_t) * 8) - 52 - 1)) - 1) >> 1
)
;
100
101 const rep_t b_UQ1 = bSignificand << (typeWidth(sizeof(rep_t) * 8) - significandBits52 - 1);
102
103 // Align the significand of b as a UQ1.(n-1) fixed-point number in the range
104 // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax
105 // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2.
106 // The max error for this approximation is achieved at endpoints, so
107 // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289...,
108 // which is about 4.5 bits.
109 // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571...
110
111 // Then, refine the reciprocal estimate using a quadratically converging
112 // Newton-Raphson iteration:
113 // x_{n+1} = x_n * (2 - x_n * b)
114 //
115 // Let b be the original divisor considered "in infinite precision" and
116 // obtained from IEEE754 representation of function argument (with the
117 // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in
118 // UQ1.(W-1).
119 //
120 // Let b_hw be an infinitely precise number obtained from the highest (HW-1)
121 // bits of divisor significand (with the implicit bit set). Corresponds to
122 // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated**
123 // version of b_UQ1.
124 //
125 // Let e_n := x_n - 1/b_hw
126 // E_n := x_n - 1/b
127 // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b)
128 // = abs(e_n) + (b - b_hw) / (b*b_hw)
129 // <= abs(e_n) + 2 * 2^-HW
130
131 // rep_t-sized iterations may be slower than the corresponding half-width
132 // variant depending on the handware and whether single/double/quad precision
133 // is selected.
134 // NB: Using half-width iterations increases computation errors due to
135 // rounding, so error estimations have to be computed taking the selected
136 // mode into account!
137#if NUMBER_OF_HALF_ITERATIONS3 > 0
138 // Starting with (n-1) half-width iterations
139 const half_rep_t b_UQ1_hw = bSignificand >> (significandBits52 + 1 - HW((sizeof(rep_t) * 8) / 2));
140
141 // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW
142 // with W0 being either 16 or 32 and W0 <= HW.
143 // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which
144 // b/2 is subtracted to obtain x0) wrapped to [0, 1) range.
145#if defined(SINGLE_PRECISION)
146 // Use 16-bit initial estimation in case we are using half-width iterations
147 // for float32 division. This is expected to be useful for some 16-bit
148 // targets. Not used by default as it requires performing more work during
149 // rounding and would hardly help on regular 32- or 64-bit targets.
150 const half_rep_t C_hw = HALF_REP_C(0x7504)0x7504U;
151#else
152 // HW is at least 32. Shifting into the highest bits if needed.
153 const half_rep_t C_hw = HALF_REP_C(0x7504F333)0x7504F333U << (HW((sizeof(rep_t) * 8) / 2) - 32);
154#endif
155
156 // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572,
157 // so x0 fits to UQ0.HW without wrapping.
158 half_rep_t x_UQ0_hw = C_hw - (b_UQ1_hw /* exact b_hw/2 as UQ0.HW */);
159 // An e_0 error is comprised of errors due to
160 // * x0 being an inherently imprecise first approximation of 1/b_hw
161 // * C_hw being some (irrational) number **truncated** to W0 bits
162 // Please note that e_0 is calculated against the infinitely precise
163 // reciprocal of b_hw (that is, **truncated** version of b).
164 //
165 // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0
166
167 // By construction, 1 <= b < 2
168 // f(x) = x * (2 - b*x) = 2*x - b*x^2
169 // f'(x) = 2 * (1 - b*x)
170 //
171 // On the [0, 1] interval, f(0) = 0,
172 // then it increses until f(1/b) = 1 / b, maximum on (0, 1),
173 // then it decreses to f(1) = 2 - b
174 //
175 // Let g(x) = x - f(x) = b*x^2 - x.
176 // On (0, 1/b), g(x) < 0 <=> f(x) > x
177 // On (1/b, 1], g(x) > 0 <=> f(x) < x
178 //
179 // For half-width iterations, b_hw is used instead of b.
180 REPEAT_N_TIMES(NUMBER_OF_HALF_ITERATIONS, {{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
181 // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
182 // of corr_UQ1_hw.{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
183 // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1).{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
184 // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
185 // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
186 // expected to be strictly positive because b_UQ1_hw has its highest bit set{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
187 // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1).{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
188 half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW);{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
189
190 // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
191 // obtaining an UQ1.(HW-1) number and proving its highest bit could be{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
192 // considered to be 0 to be able to represent it in UQ0.HW.{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
193 // From the above analysis of f(x), if corr_UQ1_hw would be represented{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
194 // without any intermediate loss of precision (that is, in twice_rep_t){ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
195 // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
196 // less otherwise. On the other hand, to obtain [1.]000..., one have to pass{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
197 // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
198 // to 1.0 being not representable as UQ0.HW).{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
199 // The fact corr_UQ1_hw was virtually round up (due to result of{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
200 // multiplication being **first** truncated, then negated - to improve{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
201 // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw.{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
202 x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (HW - 1);{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
203 // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
204 // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
205 // any number of iterations, so just subtract 2 from the reciprocal{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
206 // approximation after last iteration.{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
207
208 // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW:{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
209 // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
210 // = 1 - e_n * b_hw + 2*eps1{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
211 // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
212 // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
213 // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
214 // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
215 // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
216 // \------ >0 -------/ \-- >0 ---/{ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
217 // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U){ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
218 }){ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >>
((sizeof(rep_t) * 8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw
>> (((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw
= 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) *
8) / 2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (
((sizeof(rep_t) * 8) / 2) - 1); } { half_rep_t corr_UQ1_hw = 0
- ((rep_t)x_UQ0_hw * b_UQ1_hw >> ((sizeof(rep_t) * 8) /
2)); x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (((sizeof
(rep_t) * 8) / 2) - 1); }
219 // For initial half-width iterations, U = 2^-HW
220 // Let abs(e_n) <= u_n * U,
221 // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U)
222 // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2)
223
224 // Account for possible overflow (see above). For an overflow to occur for the
225 // first time, for "ideal" corr_UQ1_hw (that is, without intermediate
226 // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum
227 // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to
228 // be not below that value (see g(x) above), so it is safe to decrement just
229 // once after the final iteration. On the other hand, an effective value of
230 // divisor changes after this point (from b_hw to b), so adjust here.
231 x_UQ0_hw -= 1U;
232 rep_t x_UQ0 = (rep_t)x_UQ0_hw << HW((sizeof(rep_t) * 8) / 2);
233 x_UQ0 -= 1U;
Value stored to 'x_UQ0' is never read
234
235#else
236 // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n
237 const rep_t C = REP_C(0x7504F333)0x7504F333ULL << (typeWidth(sizeof(rep_t) * 8) - 32);
238 rep_t x_UQ0 = C - b_UQ1;
239 // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32
240#endif
241
242 // Error estimations for full-precision iterations are calculated just
243 // as above, but with U := 2^-W and taking extra decrementing into account.
244 // We need at least one such iteration.
245
246#ifdef USE_NATIVE_FULL_ITERATIONS
247 REPEAT_N_TIMES(NUMBER_OF_FULL_ITERATIONS, {{ rep_t corr_UQ1 = 0 - ((twice_rep_t)x_UQ0 * b_UQ1 >> (
sizeof(rep_t) * 8)); x_UQ0 = (twice_rep_t)x_UQ0 * corr_UQ1 >>
((sizeof(rep_t) * 8) - 1); }
248 rep_t corr_UQ1 = 0 - ((twice_rep_t)x_UQ0 * b_UQ1 >> typeWidth);{ rep_t corr_UQ1 = 0 - ((twice_rep_t)x_UQ0 * b_UQ1 >> (
sizeof(rep_t) * 8)); x_UQ0 = (twice_rep_t)x_UQ0 * corr_UQ1 >>
((sizeof(rep_t) * 8) - 1); }
249 x_UQ0 = (twice_rep_t)x_UQ0 * corr_UQ1 >> (typeWidth - 1);{ rep_t corr_UQ1 = 0 - ((twice_rep_t)x_UQ0 * b_UQ1 >> (
sizeof(rep_t) * 8)); x_UQ0 = (twice_rep_t)x_UQ0 * corr_UQ1 >>
((sizeof(rep_t) * 8) - 1); }
250 }){ rep_t corr_UQ1 = 0 - ((twice_rep_t)x_UQ0 * b_UQ1 >> (
sizeof(rep_t) * 8)); x_UQ0 = (twice_rep_t)x_UQ0 * corr_UQ1 >>
((sizeof(rep_t) * 8) - 1); }
251#else
252#if NUMBER_OF_FULL_ITERATIONS1 != 1
253#error Only a single emulated full iteration is supported
254#endif
255#if !(NUMBER_OF_HALF_ITERATIONS3 > 0)
256 // Cannot normally reach here: only one full-width iteration is requested and
257 // the total number of iterations should be at least 3 even for float32.
258#error Check NUMBER_OF_HALF_ITERATIONS3, NUMBER_OF_FULL_ITERATIONS1 and USE_NATIVE_FULL_ITERATIONS.
259#endif
260 // Simulating operations on a twice_rep_t to perform a single final full-width
261 // iteration. Using ad-hoc multiplication implementations to take advantage
262 // of particular structure of operands.
263 rep_t blo = b_UQ1 & loMask(-1ULL >> ((sizeof(rep_t) * 8) / 2));
264 // x_UQ0 = x_UQ0_hw * 2^HW - 1
265 // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1
266 //
267 // <--- higher half ---><--- lower half --->
268 // [x_UQ0_hw * b_UQ1_hw]
269 // + [ x_UQ0_hw * blo ]
270 // - [ b_UQ1 ]
271 // = [ result ][.... discarded ...]
272 rep_t corr_UQ1 = 0U - ( (rep_t)x_UQ0_hw * b_UQ1_hw
273 + ((rep_t)x_UQ0_hw * blo >> HW((sizeof(rep_t) * 8) / 2))
274 - REP_C(1)1ULL); // account for *possible* carry
275 rep_t lo_corr = corr_UQ1 & loMask(-1ULL >> ((sizeof(rep_t) * 8) / 2));
276 rep_t hi_corr = corr_UQ1 >> HW((sizeof(rep_t) * 8) / 2);
277 // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
278 x_UQ0 = ((rep_t)x_UQ0_hw * hi_corr << 1)
279 + ((rep_t)x_UQ0_hw * lo_corr >> (HW((sizeof(rep_t) * 8) / 2) - 1))
280 - REP_C(2)2ULL; // 1 to account for the highest bit of corr_UQ1 can be 1
281 // 1 to account for possible carry
282 // Just like the case of half-width iterations but with possibility
283 // of overflowing by one extra Ulp of x_UQ0.
284 x_UQ0 -= 1U;
285 // ... and then traditional fixup by 2 should work
286
287 // On error estimation:
288 // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW
289 // + (2^-HW + 2^-W))
290 // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW
291
292 // Then like for the half-width iterations:
293 // With 0 <= eps1, eps2 < 2^-W
294 // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b
295 // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ]
296 // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ]
297#endif
298
299 // Finally, account for possible overflow, as explained above.
300 x_UQ0 -= 2U;
301
302 // u_n for different precisions (with N-1 half-width iterations):
303 // W0 is the precision of C
304 // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW
305
306 // Estimated with bc:
307 // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; }
308 // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; }
309 // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; }
310 // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; }
311
312 // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1)
313 // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797
314 // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440
315 // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317
316 // u_3 | < 7.31 | | < 7.31 | < 27054456580
317 // u_4 | | | | < 80.4
318 // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920
319
320 // Add 2 to U_N due to final decrement.
321
322#if defined(SINGLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS3 == 2 && NUMBER_OF_FULL_ITERATIONS1 == 1
323#define RECIPROCAL_PRECISION220ULL REP_C(74)74ULL
324#elif defined(SINGLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS3 == 0 && NUMBER_OF_FULL_ITERATIONS1 == 3
325#define RECIPROCAL_PRECISION220ULL REP_C(10)10ULL
326#elif defined(DOUBLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS3 == 3 && NUMBER_OF_FULL_ITERATIONS1 == 1
327#define RECIPROCAL_PRECISION220ULL REP_C(220)220ULL
328#elif defined(QUAD_PRECISION) && NUMBER_OF_HALF_ITERATIONS3 == 4 && NUMBER_OF_FULL_ITERATIONS1 == 1
329#define RECIPROCAL_PRECISION220ULL REP_C(13922)13922ULL
330#else
331#error Invalid number of iterations
332#endif
333
334 // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W
335 x_UQ0 -= RECIPROCAL_PRECISION220ULL;
336 // Now 1/b - (2*P) * 2^-W < x < 1/b
337 // FIXME Is x_UQ0 still >= 0.5?
338
339 rep_t quotient_UQ1, dummy;
340 wideMultiply(x_UQ0, aSignificand << 1, &quotient_UQ1, &dummy);
341 // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W).
342
343 // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1),
344 // adjust it to be in [1.0, 2.0) as UQ1.SB.
345 rep_t residualLo;
346 if (quotient_UQ1 < (implicitBit(1ULL << 52) << 1)) {
347 // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB,
348 // effectively doubling its value as well as its error estimation.
349 residualLo = (aSignificand << (significandBits52 + 1)) - quotient_UQ1 * bSignificand;
350 writtenExponent -= 1;
351 aSignificand <<= 1;
352 } else {
353 // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it
354 // to UQ1.SB by right shifting by 1. Least significant bit is omitted.
355 quotient_UQ1 >>= 1;
356 residualLo = (aSignificand << significandBits52) - quotient_UQ1 * bSignificand;
357 }
358 // NB: residualLo is calculated above for the normal result case.
359 // It is re-computed on denormal path that is expected to be not so
360 // performance-sensitive.
361
362 // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB
363 // Each NextAfter() increments the floating point value by at least 2^-SB
364 // (more, if exponent was incremented).
365 // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint):
366 // q
367 // | | * | | | | |
368 // <---> 2^t
369 // | | | | | * | |
370 // q
371 // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB.
372 // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB
373 // (8*P) * 2^-W < 0.5 * 2^-SB
374 // P < 2^(W-4-SB)
375 // Generally, for at most R NextAfter() to be enough,
376 // P < (2*R - 1) * 2^(W-4-SB)
377 // For f32 (0+3): 10 < 32 (OK)
378 // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required
379 // For f64: 220 < 256 (OK)
380 // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required)
381
382 // If we have overflowed the exponent, return infinity
383 if (writtenExponent >= maxExponent((1 << ((sizeof(rep_t) * 8) - 52 - 1)) - 1))
384 return fromRep(infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
| quotientSign);
385
386 // Now, quotient_UQ1_SB <= the correctly-rounded result
387 // and may need taking NextAfter() up to 3 times (see error estimates above)
388 // r = a - b * q
389 rep_t absResult;
390 if (writtenExponent > 0) {
391 // Clear the implicit bit
392 absResult = quotient_UQ1 & significandMask((1ULL << 52) - 1U);
393 // Insert the exponent
394 absResult |= (rep_t)writtenExponent << significandBits52;
395 residualLo <<= 1;
396 } else {
397 // Prevent shift amount from being negative
398 if (significandBits52 + writtenExponent < 0)
399 return fromRep(quotientSign);
400
401 absResult = quotient_UQ1 >> (-writtenExponent + 1);
402
403 // multiplied by two to prevent shift amount to be negative
404 residualLo = (aSignificand << (significandBits52 + writtenExponent)) - (absResult * bSignificand << 1);
405 }
406
407 // Round
408 residualLo += absResult & 1; // tie to even
409 // The above line conditionally turns the below LT comparison into LTE
410 absResult += residualLo > bSignificand;
411#if defined(QUAD_PRECISION) || (defined(SINGLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS3 > 0)
412 // Do not round Infinity to NaN
413 absResult += absResult < infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
&& residualLo > (2 + 1) * bSignificand;
414#endif
415#if defined(QUAD_PRECISION)
416 absResult += absResult < infRep(((1ULL << (52 + ((sizeof(rep_t) * 8) - 52 - 1))) - 1U)
^ ((1ULL << 52) - 1U))
&& residualLo > (4 + 1) * bSignificand;
417#endif
418 return fromRep(absResult | quotientSign);
419}