1 | /* |
2 | * Double-precision 2^x function. |
3 | * |
4 | * Copyright (c) 2018, Arm Limited. |
5 | * SPDX-License-Identifier: MIT |
6 | */ |
7 | |
8 | #include <math.h> |
9 | #include <stdint.h> |
10 | #include "libm.h" |
11 | #include "exp_data.h" |
12 | |
13 | #define N (1 << EXP_TABLE_BITS) |
14 | #define Shift __exp_data.exp2_shift |
15 | #define T __exp_data.tab |
16 | #define C1 __exp_data.exp2_poly[0] |
17 | #define C2 __exp_data.exp2_poly[1] |
18 | #define C3 __exp_data.exp2_poly[2] |
19 | #define C4 __exp_data.exp2_poly[3] |
20 | #define C5 __exp_data.exp2_poly[4] |
21 | |
22 | /* Handle cases that may overflow or underflow when computing the result that |
23 | is scale*(1+TMP) without intermediate rounding. The bit representation of |
24 | scale is in SBITS, however it has a computed exponent that may have |
25 | overflown into the sign bit so that needs to be adjusted before using it as |
26 | a double. (int32_t)KI is the k used in the argument reduction and exponent |
27 | adjustment of scale, positive k here means the result may overflow and |
28 | negative k means the result may underflow. */ |
29 | static inline double specialcase(double_t tmp, uint64_t sbits, uint64_t ki) |
30 | { |
31 | double_t scale, y; |
32 | |
33 | if ((ki & 0x80000000) == 0) { |
34 | /* k > 0, the exponent of scale might have overflowed by 1. */ |
35 | sbits -= 1ull << 52; |
36 | scale = asdouble(sbits); |
37 | y = 2 * (scale + scale * tmp); |
38 | return eval_as_double(y); |
39 | } |
40 | /* k < 0, need special care in the subnormal range. */ |
41 | sbits += 1022ull << 52; |
42 | scale = asdouble(sbits); |
43 | y = scale + scale * tmp; |
44 | if (y < 1.0) { |
45 | /* Round y to the right precision before scaling it into the subnormal |
46 | range to avoid double rounding that can cause 0.5+E/2 ulp error where |
47 | E is the worst-case ulp error outside the subnormal range. So this |
48 | is only useful if the goal is better than 1 ulp worst-case error. */ |
49 | double_t hi, lo; |
50 | lo = scale - y + scale * tmp; |
51 | hi = 1.0 + y; |
52 | lo = 1.0 - hi + y + lo; |
53 | y = eval_as_double(hi + lo) - 1.0; |
54 | /* Avoid -0.0 with downward rounding. */ |
55 | if (WANT_ROUNDING && y == 0.0) |
56 | y = 0.0; |
57 | /* The underflow exception needs to be signaled explicitly. */ |
58 | fp_force_eval(fp_barrier(0x1p-1022) * 0x1p-1022); |
59 | } |
60 | y = 0x1p-1022 * y; |
61 | return eval_as_double(y); |
62 | } |
63 | |
64 | /* Top 12 bits of a double (sign and exponent bits). */ |
65 | static inline uint32_t top12(double x) |
66 | { |
67 | return asuint64(x) >> 52; |
68 | } |
69 | |
70 | double exp2(double x) |
71 | { |
72 | uint32_t abstop; |
73 | uint64_t ki, idx, top, sbits; |
74 | double_t kd, r, r2, scale, tail, tmp; |
75 | |
76 | abstop = top12(x) & 0x7ff; |
77 | if (predict_false(abstop - top12(0x1p-54) >= top12(512.0) - top12(0x1p-54))) { |
78 | if (abstop - top12(0x1p-54) >= 0x80000000) |
79 | /* Avoid spurious underflow for tiny x. */ |
80 | /* Note: 0 is common input. */ |
81 | return WANT_ROUNDING ? 1.0 + x : 1.0; |
82 | if (abstop >= top12(1024.0)) { |
83 | if (asuint64(x) == asuint64(-INFINITY)) |
84 | return 0.0; |
85 | if (abstop >= top12(INFINITY)) |
86 | return 1.0 + x; |
87 | if (!(asuint64(x) >> 63)) |
88 | return __math_oflow(0); |
89 | else if (asuint64(x) >= asuint64(-1075.0)) |
90 | return __math_uflow(0); |
91 | } |
92 | if (2 * asuint64(x) > 2 * asuint64(928.0)) |
93 | /* Large x is special cased below. */ |
94 | abstop = 0; |
95 | } |
96 | |
97 | /* exp2(x) = 2^(k/N) * 2^r, with 2^r in [2^(-1/2N),2^(1/2N)]. */ |
98 | /* x = k/N + r, with int k and r in [-1/2N, 1/2N]. */ |
99 | kd = eval_as_double(x + Shift); |
100 | ki = asuint64(kd); /* k. */ |
101 | kd -= Shift; /* k/N for int k. */ |
102 | r = x - kd; |
103 | /* 2^(k/N) ~= scale * (1 + tail). */ |
104 | idx = 2 * (ki % N); |
105 | top = ki << (52 - EXP_TABLE_BITS); |
106 | tail = asdouble(T[idx]); |
107 | /* This is only a valid scale when -1023*N < k < 1024*N. */ |
108 | sbits = T[idx + 1] + top; |
109 | /* exp2(x) = 2^(k/N) * 2^r ~= scale + scale * (tail + 2^r - 1). */ |
110 | /* Evaluation is optimized assuming superscalar pipelined execution. */ |
111 | r2 = r * r; |
112 | /* Without fma the worst case error is 0.5/N ulp larger. */ |
113 | /* Worst case error is less than 0.5+0.86/N+(abs poly error * 2^53) ulp. */ |
114 | tmp = tail + r * C1 + r2 * (C2 + r * C3) + r2 * r2 * (C4 + r * C5); |
115 | if (predict_false(abstop == 0)) |
116 | return specialcase(tmp, sbits, ki); |
117 | scale = asdouble(sbits); |
118 | /* Note: tmp == 0 or |tmp| > 2^-65 and scale > 2^-928, so there |
119 | is no spurious underflow here even without fma. */ |
120 | return eval_as_double(scale + scale * tmp); |
121 | } |
122 | |