| 1 | /* |
| 2 | * IBM Accurate Mathematical Library |
| 3 | * written by International Business Machines Corp. |
| 4 | * Copyright (C) 2001-2020 Free Software Foundation, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU Lesser General Public License as published by |
| 8 | * the Free Software Foundation; either version 2.1 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public License |
| 17 | * along with this program; if not, see <https://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | /*********************************************************************/ |
| 20 | /* MODULE_NAME: utan.c */ |
| 21 | /* */ |
| 22 | /* FUNCTIONS: utan */ |
| 23 | /* tanMp */ |
| 24 | /* */ |
| 25 | /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */ |
| 26 | /* branred.c sincos32.c mptan.c */ |
| 27 | /* utan.tbl */ |
| 28 | /* */ |
| 29 | /* An ultimate tan routine. Given an IEEE double machine number x */ |
| 30 | /* it computes the correctly rounded (to nearest) value of tan(x). */ |
| 31 | /* Assumption: Machine arithmetic operations are performed in */ |
| 32 | /* round to nearest mode of IEEE 754 standard. */ |
| 33 | /* */ |
| 34 | /*********************************************************************/ |
| 35 | |
| 36 | #include <errno.h> |
| 37 | #include <float.h> |
| 38 | #include "endian.h" |
| 39 | #include <dla.h> |
| 40 | #include "mpa.h" |
| 41 | #include "MathLib.h" |
| 42 | #include <math.h> |
| 43 | #include <math_private.h> |
| 44 | #include <fenv_private.h> |
| 45 | #include <math-underflow.h> |
| 46 | #include <libm-alias-double.h> |
| 47 | #include <fenv.h> |
| 48 | #include <stap-probe.h> |
| 49 | |
| 50 | #ifndef SECTION |
| 51 | # define SECTION |
| 52 | #endif |
| 53 | |
| 54 | static double tanMp (double); |
| 55 | void __mptan (double, mp_no *, int); |
| 56 | |
| 57 | double |
| 58 | SECTION |
| 59 | __tan (double x) |
| 60 | { |
| 61 | #include "utan.h" |
| 62 | #include "utan.tbl" |
| 63 | |
| 64 | int ux, i, n; |
| 65 | double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz, |
| 66 | s, sy, t, t1, t2, t3, t4, w, x2, xn, xx2, y, ya, |
| 67 | yya, z0, z, zz, z2, zz2; |
| 68 | int p; |
| 69 | number num, v; |
| 70 | mp_no mpa, mpt1, mpt2; |
| 71 | |
| 72 | double retval; |
| 73 | |
| 74 | int __branred (double, double *, double *); |
| 75 | int __mpranred (double, mp_no *, int); |
| 76 | |
| 77 | SET_RESTORE_ROUND_53BIT (FE_TONEAREST); |
| 78 | |
| 79 | /* x=+-INF, x=NaN */ |
| 80 | num.d = x; |
| 81 | ux = num.i[HIGH_HALF]; |
| 82 | if ((ux & 0x7ff00000) == 0x7ff00000) |
| 83 | { |
| 84 | if ((ux & 0x7fffffff) == 0x7ff00000) |
| 85 | __set_errno (EDOM); |
| 86 | retval = x - x; |
| 87 | goto ret; |
| 88 | } |
| 89 | |
| 90 | w = (x < 0.0) ? -x : x; |
| 91 | |
| 92 | /* (I) The case abs(x) <= 1.259e-8 */ |
| 93 | if (w <= g1.d) |
| 94 | { |
| 95 | math_check_force_underflow_nonneg (w); |
| 96 | retval = x; |
| 97 | goto ret; |
| 98 | } |
| 99 | |
| 100 | /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */ |
| 101 | if (w <= g2.d) |
| 102 | { |
| 103 | /* First stage */ |
| 104 | x2 = x * x; |
| 105 | |
| 106 | t2 = d9.d + x2 * d11.d; |
| 107 | t2 = d7.d + x2 * t2; |
| 108 | t2 = d5.d + x2 * t2; |
| 109 | t2 = d3.d + x2 * t2; |
| 110 | t2 *= x * x2; |
| 111 | |
| 112 | if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2)) |
| 113 | { |
| 114 | retval = y; |
| 115 | goto ret; |
| 116 | } |
| 117 | |
| 118 | /* Second stage */ |
| 119 | c1 = a25.d + x2 * a27.d; |
| 120 | c1 = a23.d + x2 * c1; |
| 121 | c1 = a21.d + x2 * c1; |
| 122 | c1 = a19.d + x2 * c1; |
| 123 | c1 = a17.d + x2 * c1; |
| 124 | c1 = a15.d + x2 * c1; |
| 125 | c1 *= x2; |
| 126 | |
| 127 | EMULV (x, x, x2, xx2); |
| 128 | ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2); |
| 129 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 130 | ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2); |
| 131 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 132 | ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2); |
| 133 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 134 | ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2); |
| 135 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 136 | ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2); |
| 137 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 138 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 139 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 140 | MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2); |
| 141 | ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2); |
| 142 | if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1)) |
| 143 | { |
| 144 | retval = y; |
| 145 | goto ret; |
| 146 | } |
| 147 | retval = tanMp (x); |
| 148 | goto ret; |
| 149 | } |
| 150 | |
| 151 | /* (III) The case 0.0608 < abs(x) <= 0.787 */ |
| 152 | if (w <= g3.d) |
| 153 | { |
| 154 | /* First stage */ |
| 155 | i = ((int) (mfftnhf.d + TWO8 * w)); |
| 156 | z = w - xfg[i][0].d; |
| 157 | z2 = z * z; |
| 158 | s = (x < 0.0) ? -1 : 1; |
| 159 | pz = z + z * z2 * (e0.d + z2 * e1.d); |
| 160 | fi = xfg[i][1].d; |
| 161 | gi = xfg[i][2].d; |
| 162 | t2 = pz * (gi + fi) / (gi - pz); |
| 163 | if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d)) |
| 164 | { |
| 165 | retval = (s * y); |
| 166 | goto ret; |
| 167 | } |
| 168 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 169 | t4 = fi * ua3.d + t3 * ub3.d; |
| 170 | if ((y = fi + (t2 - t4)) == fi + (t2 + t4)) |
| 171 | { |
| 172 | retval = (s * y); |
| 173 | goto ret; |
| 174 | } |
| 175 | |
| 176 | /* Second stage */ |
| 177 | ffi = xfg[i][3].d; |
| 178 | c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d)); |
| 179 | EMULV (z, z, z2, zz2); |
| 180 | ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2); |
| 181 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 182 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 183 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 184 | MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2); |
| 185 | ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2); |
| 186 | |
| 187 | ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2); |
| 188 | MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2); |
| 189 | SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2); |
| 190 | DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4); |
| 191 | |
| 192 | if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3)) |
| 193 | { |
| 194 | retval = (s * y); |
| 195 | goto ret; |
| 196 | } |
| 197 | retval = tanMp (x); |
| 198 | goto ret; |
| 199 | } |
| 200 | |
| 201 | /* (---) The case 0.787 < abs(x) <= 25 */ |
| 202 | if (w <= g4.d) |
| 203 | { |
| 204 | /* Range reduction by algorithm i */ |
| 205 | t = (x * hpinv.d + toint.d); |
| 206 | xn = t - toint.d; |
| 207 | v.d = t; |
| 208 | t1 = (x - xn * mp1.d) - xn * mp2.d; |
| 209 | n = v.i[LOW_HALF] & 0x00000001; |
| 210 | da = xn * mp3.d; |
| 211 | a = t1 - da; |
| 212 | da = (t1 - a) - da; |
| 213 | if (a < 0.0) |
| 214 | { |
| 215 | ya = -a; |
| 216 | yya = -da; |
| 217 | sy = -1; |
| 218 | } |
| 219 | else |
| 220 | { |
| 221 | ya = a; |
| 222 | yya = da; |
| 223 | sy = 1; |
| 224 | } |
| 225 | |
| 226 | /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */ |
| 227 | if (ya <= gy1.d) |
| 228 | { |
| 229 | retval = tanMp (x); |
| 230 | goto ret; |
| 231 | } |
| 232 | |
| 233 | /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */ |
| 234 | if (ya <= gy2.d) |
| 235 | { |
| 236 | a2 = a * a; |
| 237 | t2 = d9.d + a2 * d11.d; |
| 238 | t2 = d7.d + a2 * t2; |
| 239 | t2 = d5.d + a2 * t2; |
| 240 | t2 = d3.d + a2 * t2; |
| 241 | t2 = da + a * a2 * t2; |
| 242 | |
| 243 | if (n) |
| 244 | { |
| 245 | /* First stage -cot */ |
| 246 | EADD (a, t2, b, db); |
| 247 | DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4); |
| 248 | if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c)) |
| 249 | { |
| 250 | retval = (-y); |
| 251 | goto ret; |
| 252 | } |
| 253 | } |
| 254 | else |
| 255 | { |
| 256 | /* First stage tan */ |
| 257 | if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a)) |
| 258 | { |
| 259 | retval = y; |
| 260 | goto ret; |
| 261 | } |
| 262 | } |
| 263 | /* Second stage */ |
| 264 | /* Range reduction by algorithm ii */ |
| 265 | t = (x * hpinv.d + toint.d); |
| 266 | xn = t - toint.d; |
| 267 | v.d = t; |
| 268 | t1 = (x - xn * mp1.d) - xn * mp2.d; |
| 269 | n = v.i[LOW_HALF] & 0x00000001; |
| 270 | da = xn * pp3.d; |
| 271 | t = t1 - da; |
| 272 | da = (t1 - t) - da; |
| 273 | t1 = xn * pp4.d; |
| 274 | a = t - t1; |
| 275 | da = ((t - a) - t1) + da; |
| 276 | |
| 277 | /* Second stage */ |
| 278 | EADD (a, da, t1, t2); |
| 279 | a = t1; |
| 280 | da = t2; |
| 281 | MUL2 (a, da, a, da, x2, xx2, t1, t2); |
| 282 | |
| 283 | c1 = a25.d + x2 * a27.d; |
| 284 | c1 = a23.d + x2 * c1; |
| 285 | c1 = a21.d + x2 * c1; |
| 286 | c1 = a19.d + x2 * c1; |
| 287 | c1 = a17.d + x2 * c1; |
| 288 | c1 = a15.d + x2 * c1; |
| 289 | c1 *= x2; |
| 290 | |
| 291 | ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2); |
| 292 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 293 | ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2); |
| 294 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 295 | ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2); |
| 296 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 297 | ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2); |
| 298 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 299 | ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2); |
| 300 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 301 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 302 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 303 | MUL2 (a, da, c1, cc1, c2, cc2, t1, t2); |
| 304 | ADD2 (a, da, c2, cc2, c1, cc1, t1, t2); |
| 305 | |
| 306 | if (n) |
| 307 | { |
| 308 | /* Second stage -cot */ |
| 309 | DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4); |
| 310 | if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2)) |
| 311 | { |
| 312 | retval = (-y); |
| 313 | goto ret; |
| 314 | } |
| 315 | } |
| 316 | else |
| 317 | { |
| 318 | /* Second stage tan */ |
| 319 | if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1)) |
| 320 | { |
| 321 | retval = y; |
| 322 | goto ret; |
| 323 | } |
| 324 | } |
| 325 | retval = tanMp (x); |
| 326 | goto ret; |
| 327 | } |
| 328 | |
| 329 | /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */ |
| 330 | |
| 331 | /* First stage */ |
| 332 | i = ((int) (mfftnhf.d + TWO8 * ya)); |
| 333 | z = (z0 = (ya - xfg[i][0].d)) + yya; |
| 334 | z2 = z * z; |
| 335 | pz = z + z * z2 * (e0.d + z2 * e1.d); |
| 336 | fi = xfg[i][1].d; |
| 337 | gi = xfg[i][2].d; |
| 338 | |
| 339 | if (n) |
| 340 | { |
| 341 | /* -cot */ |
| 342 | t2 = pz * (fi + gi) / (fi + pz); |
| 343 | if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d)) |
| 344 | { |
| 345 | retval = (-sy * y); |
| 346 | goto ret; |
| 347 | } |
| 348 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 349 | t4 = gi * ua10.d + t3 * ub10.d; |
| 350 | if ((y = gi - (t2 - t4)) == gi - (t2 + t4)) |
| 351 | { |
| 352 | retval = (-sy * y); |
| 353 | goto ret; |
| 354 | } |
| 355 | } |
| 356 | else |
| 357 | { |
| 358 | /* tan */ |
| 359 | t2 = pz * (gi + fi) / (gi - pz); |
| 360 | if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d)) |
| 361 | { |
| 362 | retval = (sy * y); |
| 363 | goto ret; |
| 364 | } |
| 365 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 366 | t4 = fi * ua9.d + t3 * ub9.d; |
| 367 | if ((y = fi + (t2 - t4)) == fi + (t2 + t4)) |
| 368 | { |
| 369 | retval = (sy * y); |
| 370 | goto ret; |
| 371 | } |
| 372 | } |
| 373 | |
| 374 | /* Second stage */ |
| 375 | ffi = xfg[i][3].d; |
| 376 | EADD (z0, yya, z, zz) |
| 377 | MUL2 (z, zz, z, zz, z2, zz2, t1, t2); |
| 378 | c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d)); |
| 379 | ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2); |
| 380 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 381 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 382 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 383 | MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2); |
| 384 | ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2); |
| 385 | |
| 386 | ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2); |
| 387 | MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2); |
| 388 | SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2); |
| 389 | |
| 390 | if (n) |
| 391 | { |
| 392 | /* -cot */ |
| 393 | DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4); |
| 394 | if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3)) |
| 395 | { |
| 396 | retval = (-sy * y); |
| 397 | goto ret; |
| 398 | } |
| 399 | } |
| 400 | else |
| 401 | { |
| 402 | /* tan */ |
| 403 | DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4); |
| 404 | if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3)) |
| 405 | { |
| 406 | retval = (sy * y); |
| 407 | goto ret; |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | retval = tanMp (x); |
| 412 | goto ret; |
| 413 | } |
| 414 | |
| 415 | /* (---) The case 25 < abs(x) <= 1e8 */ |
| 416 | if (w <= g5.d) |
| 417 | { |
| 418 | /* Range reduction by algorithm ii */ |
| 419 | t = (x * hpinv.d + toint.d); |
| 420 | xn = t - toint.d; |
| 421 | v.d = t; |
| 422 | t1 = (x - xn * mp1.d) - xn * mp2.d; |
| 423 | n = v.i[LOW_HALF] & 0x00000001; |
| 424 | da = xn * pp3.d; |
| 425 | t = t1 - da; |
| 426 | da = (t1 - t) - da; |
| 427 | t1 = xn * pp4.d; |
| 428 | a = t - t1; |
| 429 | da = ((t - a) - t1) + da; |
| 430 | EADD (a, da, t1, t2); |
| 431 | a = t1; |
| 432 | da = t2; |
| 433 | if (a < 0.0) |
| 434 | { |
| 435 | ya = -a; |
| 436 | yya = -da; |
| 437 | sy = -1; |
| 438 | } |
| 439 | else |
| 440 | { |
| 441 | ya = a; |
| 442 | yya = da; |
| 443 | sy = 1; |
| 444 | } |
| 445 | |
| 446 | /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */ |
| 447 | if (ya <= gy1.d) |
| 448 | { |
| 449 | retval = tanMp (x); |
| 450 | goto ret; |
| 451 | } |
| 452 | |
| 453 | /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */ |
| 454 | if (ya <= gy2.d) |
| 455 | { |
| 456 | a2 = a * a; |
| 457 | t2 = d9.d + a2 * d11.d; |
| 458 | t2 = d7.d + a2 * t2; |
| 459 | t2 = d5.d + a2 * t2; |
| 460 | t2 = d3.d + a2 * t2; |
| 461 | t2 = da + a * a2 * t2; |
| 462 | |
| 463 | if (n) |
| 464 | { |
| 465 | /* First stage -cot */ |
| 466 | EADD (a, t2, b, db); |
| 467 | DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4); |
| 468 | if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c)) |
| 469 | { |
| 470 | retval = (-y); |
| 471 | goto ret; |
| 472 | } |
| 473 | } |
| 474 | else |
| 475 | { |
| 476 | /* First stage tan */ |
| 477 | if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a)) |
| 478 | { |
| 479 | retval = y; |
| 480 | goto ret; |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | /* Second stage */ |
| 485 | MUL2 (a, da, a, da, x2, xx2, t1, t2); |
| 486 | c1 = a25.d + x2 * a27.d; |
| 487 | c1 = a23.d + x2 * c1; |
| 488 | c1 = a21.d + x2 * c1; |
| 489 | c1 = a19.d + x2 * c1; |
| 490 | c1 = a17.d + x2 * c1; |
| 491 | c1 = a15.d + x2 * c1; |
| 492 | c1 *= x2; |
| 493 | |
| 494 | ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2); |
| 495 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 496 | ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2); |
| 497 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 498 | ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2); |
| 499 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 500 | ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2); |
| 501 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 502 | ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2); |
| 503 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 504 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 505 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 506 | MUL2 (a, da, c1, cc1, c2, cc2, t1, t2); |
| 507 | ADD2 (a, da, c2, cc2, c1, cc1, t1, t2); |
| 508 | |
| 509 | if (n) |
| 510 | { |
| 511 | /* Second stage -cot */ |
| 512 | DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4); |
| 513 | if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2)) |
| 514 | { |
| 515 | retval = (-y); |
| 516 | goto ret; |
| 517 | } |
| 518 | } |
| 519 | else |
| 520 | { |
| 521 | /* Second stage tan */ |
| 522 | if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1)) |
| 523 | { |
| 524 | retval = (y); |
| 525 | goto ret; |
| 526 | } |
| 527 | } |
| 528 | retval = tanMp (x); |
| 529 | goto ret; |
| 530 | } |
| 531 | |
| 532 | /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */ |
| 533 | /* First stage */ |
| 534 | i = ((int) (mfftnhf.d + TWO8 * ya)); |
| 535 | z = (z0 = (ya - xfg[i][0].d)) + yya; |
| 536 | z2 = z * z; |
| 537 | pz = z + z * z2 * (e0.d + z2 * e1.d); |
| 538 | fi = xfg[i][1].d; |
| 539 | gi = xfg[i][2].d; |
| 540 | |
| 541 | if (n) |
| 542 | { |
| 543 | /* -cot */ |
| 544 | t2 = pz * (fi + gi) / (fi + pz); |
| 545 | if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d)) |
| 546 | { |
| 547 | retval = (-sy * y); |
| 548 | goto ret; |
| 549 | } |
| 550 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 551 | t4 = gi * ua18.d + t3 * ub18.d; |
| 552 | if ((y = gi - (t2 - t4)) == gi - (t2 + t4)) |
| 553 | { |
| 554 | retval = (-sy * y); |
| 555 | goto ret; |
| 556 | } |
| 557 | } |
| 558 | else |
| 559 | { |
| 560 | /* tan */ |
| 561 | t2 = pz * (gi + fi) / (gi - pz); |
| 562 | if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d)) |
| 563 | { |
| 564 | retval = (sy * y); |
| 565 | goto ret; |
| 566 | } |
| 567 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 568 | t4 = fi * ua17.d + t3 * ub17.d; |
| 569 | if ((y = fi + (t2 - t4)) == fi + (t2 + t4)) |
| 570 | { |
| 571 | retval = (sy * y); |
| 572 | goto ret; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | /* Second stage */ |
| 577 | ffi = xfg[i][3].d; |
| 578 | EADD (z0, yya, z, zz); |
| 579 | MUL2 (z, zz, z, zz, z2, zz2, t1, t2); |
| 580 | c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d)); |
| 581 | ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2); |
| 582 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 583 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 584 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 585 | MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2); |
| 586 | ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2); |
| 587 | |
| 588 | ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2); |
| 589 | MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2); |
| 590 | SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2); |
| 591 | |
| 592 | if (n) |
| 593 | { |
| 594 | /* -cot */ |
| 595 | DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4); |
| 596 | if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3)) |
| 597 | { |
| 598 | retval = (-sy * y); |
| 599 | goto ret; |
| 600 | } |
| 601 | } |
| 602 | else |
| 603 | { |
| 604 | /* tan */ |
| 605 | DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4); |
| 606 | if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3)) |
| 607 | { |
| 608 | retval = (sy * y); |
| 609 | goto ret; |
| 610 | } |
| 611 | } |
| 612 | retval = tanMp (x); |
| 613 | goto ret; |
| 614 | } |
| 615 | |
| 616 | /* (---) The case 1e8 < abs(x) < 2**1024 */ |
| 617 | /* Range reduction by algorithm iii */ |
| 618 | n = (__branred (x, &a, &da)) & 0x00000001; |
| 619 | EADD (a, da, t1, t2); |
| 620 | a = t1; |
| 621 | da = t2; |
| 622 | if (a < 0.0) |
| 623 | { |
| 624 | ya = -a; |
| 625 | yya = -da; |
| 626 | sy = -1; |
| 627 | } |
| 628 | else |
| 629 | { |
| 630 | ya = a; |
| 631 | yya = da; |
| 632 | sy = 1; |
| 633 | } |
| 634 | |
| 635 | /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */ |
| 636 | if (ya <= gy1.d) |
| 637 | { |
| 638 | retval = tanMp (x); |
| 639 | goto ret; |
| 640 | } |
| 641 | |
| 642 | /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */ |
| 643 | if (ya <= gy2.d) |
| 644 | { |
| 645 | a2 = a * a; |
| 646 | t2 = d9.d + a2 * d11.d; |
| 647 | t2 = d7.d + a2 * t2; |
| 648 | t2 = d5.d + a2 * t2; |
| 649 | t2 = d3.d + a2 * t2; |
| 650 | t2 = da + a * a2 * t2; |
| 651 | if (n) |
| 652 | { |
| 653 | /* First stage -cot */ |
| 654 | EADD (a, t2, b, db); |
| 655 | DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4); |
| 656 | if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c)) |
| 657 | { |
| 658 | retval = (-y); |
| 659 | goto ret; |
| 660 | } |
| 661 | } |
| 662 | else |
| 663 | { |
| 664 | /* First stage tan */ |
| 665 | if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a)) |
| 666 | { |
| 667 | retval = y; |
| 668 | goto ret; |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | /* Second stage */ |
| 673 | /* Reduction by algorithm iv */ |
| 674 | p = 10; |
| 675 | n = (__mpranred (x, &mpa, p)) & 0x00000001; |
| 676 | __mp_dbl (&mpa, &a, p); |
| 677 | __dbl_mp (a, &mpt1, p); |
| 678 | __sub (&mpa, &mpt1, &mpt2, p); |
| 679 | __mp_dbl (&mpt2, &da, p); |
| 680 | |
| 681 | MUL2 (a, da, a, da, x2, xx2, t1, t2); |
| 682 | |
| 683 | c1 = a25.d + x2 * a27.d; |
| 684 | c1 = a23.d + x2 * c1; |
| 685 | c1 = a21.d + x2 * c1; |
| 686 | c1 = a19.d + x2 * c1; |
| 687 | c1 = a17.d + x2 * c1; |
| 688 | c1 = a15.d + x2 * c1; |
| 689 | c1 *= x2; |
| 690 | |
| 691 | ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2); |
| 692 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 693 | ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2); |
| 694 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 695 | ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2); |
| 696 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 697 | ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2); |
| 698 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 699 | ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2); |
| 700 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 701 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 702 | MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2); |
| 703 | MUL2 (a, da, c1, cc1, c2, cc2, t1, t2); |
| 704 | ADD2 (a, da, c2, cc2, c1, cc1, t1, t2); |
| 705 | |
| 706 | if (n) |
| 707 | { |
| 708 | /* Second stage -cot */ |
| 709 | DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4); |
| 710 | if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2)) |
| 711 | { |
| 712 | retval = (-y); |
| 713 | goto ret; |
| 714 | } |
| 715 | } |
| 716 | else |
| 717 | { |
| 718 | /* Second stage tan */ |
| 719 | if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1)) |
| 720 | { |
| 721 | retval = y; |
| 722 | goto ret; |
| 723 | } |
| 724 | } |
| 725 | retval = tanMp (x); |
| 726 | goto ret; |
| 727 | } |
| 728 | |
| 729 | /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */ |
| 730 | /* First stage */ |
| 731 | i = ((int) (mfftnhf.d + TWO8 * ya)); |
| 732 | z = (z0 = (ya - xfg[i][0].d)) + yya; |
| 733 | z2 = z * z; |
| 734 | pz = z + z * z2 * (e0.d + z2 * e1.d); |
| 735 | fi = xfg[i][1].d; |
| 736 | gi = xfg[i][2].d; |
| 737 | |
| 738 | if (n) |
| 739 | { |
| 740 | /* -cot */ |
| 741 | t2 = pz * (fi + gi) / (fi + pz); |
| 742 | if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d)) |
| 743 | { |
| 744 | retval = (-sy * y); |
| 745 | goto ret; |
| 746 | } |
| 747 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 748 | t4 = gi * ua26.d + t3 * ub26.d; |
| 749 | if ((y = gi - (t2 - t4)) == gi - (t2 + t4)) |
| 750 | { |
| 751 | retval = (-sy * y); |
| 752 | goto ret; |
| 753 | } |
| 754 | } |
| 755 | else |
| 756 | { |
| 757 | /* tan */ |
| 758 | t2 = pz * (gi + fi) / (gi - pz); |
| 759 | if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d)) |
| 760 | { |
| 761 | retval = (sy * y); |
| 762 | goto ret; |
| 763 | } |
| 764 | t3 = (t2 < 0.0) ? -t2 : t2; |
| 765 | t4 = fi * ua25.d + t3 * ub25.d; |
| 766 | if ((y = fi + (t2 - t4)) == fi + (t2 + t4)) |
| 767 | { |
| 768 | retval = (sy * y); |
| 769 | goto ret; |
| 770 | } |
| 771 | } |
| 772 | |
| 773 | /* Second stage */ |
| 774 | ffi = xfg[i][3].d; |
| 775 | EADD (z0, yya, z, zz); |
| 776 | MUL2 (z, zz, z, zz, z2, zz2, t1, t2); |
| 777 | c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d)); |
| 778 | ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2); |
| 779 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 780 | ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2); |
| 781 | MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2); |
| 782 | MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2); |
| 783 | ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2); |
| 784 | |
| 785 | ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2); |
| 786 | MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2); |
| 787 | SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2); |
| 788 | |
| 789 | if (n) |
| 790 | { |
| 791 | /* -cot */ |
| 792 | DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4); |
| 793 | if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3)) |
| 794 | { |
| 795 | retval = (-sy * y); |
| 796 | goto ret; |
| 797 | } |
| 798 | } |
| 799 | else |
| 800 | { |
| 801 | /* tan */ |
| 802 | DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4); |
| 803 | if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3)) |
| 804 | { |
| 805 | retval = (sy * y); |
| 806 | goto ret; |
| 807 | } |
| 808 | } |
| 809 | retval = tanMp (x); |
| 810 | goto ret; |
| 811 | |
| 812 | ret: |
| 813 | return retval; |
| 814 | } |
| 815 | |
| 816 | /* multiple precision stage */ |
| 817 | /* Convert x to multi precision number,compute tan(x) by mptan() routine */ |
| 818 | /* and converts result back to double */ |
| 819 | static double |
| 820 | SECTION |
| 821 | tanMp (double x) |
| 822 | { |
| 823 | int p; |
| 824 | double y; |
| 825 | mp_no mpy; |
| 826 | p = 32; |
| 827 | __mptan (x, &mpy, p); |
| 828 | __mp_dbl (&mpy, &y, p); |
| 829 | LIBC_PROBE (slowtan, 2, &x, &y); |
| 830 | return y; |
| 831 | } |
| 832 | |
| 833 | #ifndef __tan |
| 834 | libm_alias_double (__tan, tan) |
| 835 | #endif |
| 836 | |