merge from gcc
[deliverable/binutils-gdb.git] / libdecnumber / bid / bid2dpd_dpd2bid.c
CommitLineData
0f5f799a 1/* Copyright (C) 2007-2013 Free Software Foundation, Inc.
f5bc1778
DJ
2
3This file is part of GCC.
4
5GCC is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License as published by the Free
168a2f77 7Software Foundation; either version 3, or (at your option) any later
f5bc1778
DJ
8version.
9
f5bc1778
DJ
10GCC is distributed in the hope that it will be useful, but WITHOUT ANY
11WARRANTY; without even the implied warranty of MERCHANTABILITY or
12FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13for more details.
14
168a2f77
DD
15Under Section 7 of GPL version 3, you are granted additional
16permissions described in the GCC Runtime Library Exception, version
173.1, as published by the Free Software Foundation.
18
19You should have received a copy of the GNU General Public License and
20a copy of the GCC Runtime Library Exception along with this program;
21see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22<http://www.gnu.org/licenses/>. */
f5bc1778
DJ
23
24#undef IN_LIBGCC2
25#include "bid-dpd.h"
26
27/* get full 64x64bit product */
28#define __mul_64x64_to_128(P, CX, CY) \
29{ \
30 UINT64 CXH, CXL, CYH,CYL,PL,PH,PM,PM2; \
31 CXH = (CX) >> 32; \
32 CXL = (UINT32)(CX); \
33 CYH = (CY) >> 32; \
34 CYL = (UINT32)(CY); \
35 \
36 PM = CXH*CYL; \
37 PH = CXH*CYH; \
38 PL = CXL*CYL; \
39 PM2 = CXL*CYH; \
40 PH += (PM>>32); \
41 PM = (UINT64)((UINT32)PM)+PM2+(PL>>32); \
42 \
43 (P).w[1] = PH + (PM>>32); \
44 (P).w[0] = (PM<<32)+(UINT32)PL; \
45}
46
47/* add 64-bit value to 128-bit */
48#define __add_128_64(R128, A128, B64) \
49{ \
50 UINT64 R64H; \
51 R64H = (A128).w[1]; \
52 (R128).w[0] = (B64) + (A128).w[0]; \
53 if((R128).w[0] < (B64)) R64H ++; \
54 (R128).w[1] = R64H; \
55}
56
57/* add 128-bit value to 128-bit (assume no carry-out) */
58#define __add_128_128(R128, A128, B128) \
59{ \
60 UINT128 Q128; \
61 Q128.w[1] = (A128).w[1]+(B128).w[1]; \
62 Q128.w[0] = (B128).w[0] + (A128).w[0]; \
63 if(Q128.w[0] < (B128).w[0]) Q128.w[1] ++; \
64 (R128).w[1] = Q128.w[1]; \
65 (R128).w[0] = Q128.w[0]; \
66}
67
68#define __mul_128x128_high(Q, A, B) \
69{ \
70 UINT128 ALBL, ALBH, AHBL, AHBH, QM, QM2; \
71 \
72 __mul_64x64_to_128(ALBH, (A).w[0], (B).w[1]); \
73 __mul_64x64_to_128(AHBL, (B).w[0], (A).w[1]); \
74 __mul_64x64_to_128(ALBL, (A).w[0], (B).w[0]); \
75 __mul_64x64_to_128(AHBH, (A).w[1],(B).w[1]); \
76 \
77 __add_128_128(QM, ALBH, AHBL); \
78 __add_128_64(QM2, QM, ALBL.w[1]); \
79 __add_128_64((Q), AHBH, QM2.w[1]); \
80}
81
82#include "bid2dpd_dpd2bid.h"
83
84static const unsigned int dm103[] =
85 { 0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000 };
86
87void _bid_to_dpd32 (_Decimal32 *, _Decimal32 *);
88
89void
90_bid_to_dpd32 (_Decimal32 *pres, _Decimal32 *px) {
91 unsigned int sign, coefficient_x, exp, dcoeff;
92 unsigned int b2, b1, b0, b01, res;
93 _Decimal32 x = *px;
94
95 sign = (x & 0x80000000);
96 if ((x & 0x60000000ul) == 0x60000000ul) {
97 /* special encodings */
98 if ((x & 0x78000000ul) == 0x78000000ul) {
99 *pres = x; /* NaN or Infinity */
100 return;
101 }
102 /* coefficient */
103 coefficient_x = (x & 0x001ffffful) | 0x00800000ul;
104 if (coefficient_x >= 10000000) coefficient_x = 0;
105 /* get exponent */
106 exp = (x >> 21) & 0xff;
107 } else {
108 exp = (x >> 23) & 0xff;
109 coefficient_x = (x & 0x007ffffful);
110 }
111 b01 = coefficient_x / 1000;
112 b2 = coefficient_x - 1000 * b01;
113 b0 = b01 / 1000;
114 b1 = b01 - 1000 * b0;
115 dcoeff = b2d[b2] | b2d2[b1];
116 if (b0 >= 8) { /* is b0 8 or 9? */
117 res = sign | ((0x600 | ((exp >> 6) << 7) |
118 ((b0 & 1) << 6) | (exp & 0x3f)) << 20) | dcoeff;
119 } else { /* else b0 is 0..7 */
120 res = sign | ((((exp >> 6) << 9) | (b0 << 6) |
121 (exp & 0x3f)) << 20) | dcoeff;
122 }
123 *pres = res;
124}
125
126void _dpd_to_bid32 (_Decimal32 *, _Decimal32 *);
127
128void
129_dpd_to_bid32 (_Decimal32 *pres, _Decimal32 *px) {
130 unsigned int r;
131 unsigned int sign, exp, bcoeff;
132 UINT64 trailing;
133 unsigned int d0, d1, d2;
134 _Decimal32 x = *px;
135
136 sign = (x & 0x80000000);
137 trailing = (x & 0x000fffff);
138 if ((x & 0x78000000) == 0x78000000) {
139 *pres = x;
140 return;
141 } else { /* normal number */
142 if ((x & 0x60000000) == 0x60000000) { /* G0..G1 = 11 -> d0 = 8 + G4 */
143 d0 = d2b3[((x >> 26) & 1) | 8]; /* d0 = (comb & 0x0100 ? 9 : 8); */
144 exp = (x >> 27) & 3; /* exp leading bits are G2..G3 */
145 } else {
146 d0 = d2b3[(x >> 26) & 0x7];
147 exp = (x >> 29) & 3; /* exp loading bits are G0..G1 */
148 }
149 d1 = d2b2[(trailing >> 10) & 0x3ff];
150 d2 = d2b[(trailing) & 0x3ff];
151 bcoeff = d2 + d1 + d0;
152 exp = (exp << 6) + ((x >> 20) & 0x3f);
153 if (bcoeff < (1 << 23)) {
154 r = exp;
155 r <<= 23;
156 r |= (bcoeff | sign);
157 } else {
158 r = exp;
159 r <<= 21;
160 r |= (sign | 0x60000000ul);
161 /* add coeff, without leading bits */
162 r |= (((unsigned int) bcoeff) & 0x1fffff);
163 }
164 }
165 *pres = r;
166}
167
168void _bid_to_dpd64 (_Decimal64 *, _Decimal64 *);
169
170void
171_bid_to_dpd64 (_Decimal64 *pres, _Decimal64 *px) {
172 UINT64 res;
173 UINT64 sign, comb, exp, B34, B01;
174 UINT64 d103, D61;
175 UINT64 b0, b2, b3, b5;
176 unsigned int b1, b4;
177 UINT64 bcoeff;
178 UINT64 dcoeff;
179 unsigned int yhi, ylo;
180 _Decimal64 x = *px;
181
182 sign = (x & 0x8000000000000000ull);
183 comb = (x & 0x7ffc000000000000ull) >> 51;
184 if ((comb & 0xf00) == 0xf00) {
185 *pres = x;
186 return;
187 } else { /* Normal number */
188 if ((comb & 0xc00) == 0xc00) { /* G0..G1 = 11 -> exp is G2..G11 */
189 exp = (comb) & 0x3ff;
190 bcoeff = (x & 0x0007ffffffffffffull) | 0x0020000000000000ull;
191 } else {
192 exp = (comb >> 2) & 0x3ff;
193 bcoeff = (x & 0x001fffffffffffffull);
194 }
195 D61 = 2305843009ull; /* Floor(2^61 / 10^9) */
196 /* Multiply the binary coefficient by ceil(2^64 / 1000), and take the upper
197 64-bits in order to compute a division by 1000. */
198 yhi = (D61 * (UINT64)(bcoeff >> (UINT64)27)) >> (UINT64)34;
199 ylo = bcoeff - 1000000000ull * yhi;
200 if (ylo >= 1000000000) {
201 ylo = ylo - 1000000000;
202 yhi = yhi + 1;
203 }
204 d103 = 0x4189374c;
205 B34 = ((UINT64) ylo * d103) >> (32 + 8);
206 B01 = ((UINT64) yhi * d103) >> (32 + 8);
207 b5 = ylo - B34 * 1000;
208 b2 = yhi - B01 * 1000;
209 b3 = ((UINT64) B34 * d103) >> (32 + 8);
210 b0 = ((UINT64) B01 * d103) >> (32 + 8);
211 b4 = (unsigned int) B34 - (unsigned int) b3 *1000;
212 b1 = (unsigned int) B01 - (unsigned int) dm103[b0];
213 dcoeff = b2d[b5] | b2d2[b4] | b2d3[b3] | b2d4[b2] | b2d5[b1];
214 if (b0 >= 8) /* is b0 8 or 9? */
215 res = sign | ((0x1800 | ((exp >> 8) << 9) | ((b0 & 1) << 8) |
216 (exp & 0xff)) << 50) | dcoeff;
217 else /* else b0 is 0..7 */
218 res = sign | ((((exp >> 8) << 11) | (b0 << 8) |
219 (exp & 0xff)) << 50) | dcoeff;
220 }
221 *pres = res;
222}
223
224void _dpd_to_bid64 (_Decimal64 *, _Decimal64 *);
225
226void
227_dpd_to_bid64 (_Decimal64 *pres, _Decimal64 *px) {
228 UINT64 res;
229 UINT64 sign, comb, exp;
230 UINT64 trailing;
231 UINT64 d0, d1, d2;
232 unsigned int d3, d4, d5;
233 UINT64 bcoeff, mask;
234 _Decimal64 x = *px;
235
236 sign = (x & 0x8000000000000000ull);
237 comb = (x & 0x7ffc000000000000ull) >> 50;
238 trailing = (x & 0x0003ffffffffffffull);
239 if ((comb & 0x1e00) == 0x1e00) {
240 if ((comb & 0x1f00) == 0x1f00) { /* G0..G4 = 11111 -> NaN */
241 if (comb & 0x0100) { /* G5 = 1 -> sNaN */
242 *pres = x;
243 } else { /* G5 = 0 -> qNaN */
244 *pres = x;
245 }
246 } else { /*if ((comb & 0x1e00) == 0x1e00); G0..G4 = 11110 -> INF */
247 *pres = x;
248 }
249 return;
250 } else { /* normal number */
251 if ((comb & 0x1800) == 0x1800) { /* G0..G1 = 11 -> d0 = 8 + G4 */
252 d0 = d2b6[((comb >> 8) & 1) | 8]; /* d0 = (comb & 0x0100 ? 9 : 8); */
253 exp = (comb & 0x600) >> 1; /* exp = (comb & 0x0400 ? 1 : 0) * 0x200 +
254 (comb & 0x0200 ? 1 : 0) * 0x100; exp leading bits are G2..G3 */
255 } else {
256 d0 = d2b6[(comb >> 8) & 0x7];
257 exp = (comb & 0x1800) >> 3; /* exp = (comb & 0x1000 ? 1 : 0) * 0x200 +
258 (comb & 0x0800 ? 1 : 0) * 0x100; exp loading bits are G0..G1 */
259 }
260 d1 = d2b5[(trailing >> 40) & 0x3ff];
261 d2 = d2b4[(trailing >> 30) & 0x3ff];
262 d3 = d2b3[(trailing >> 20) & 0x3ff];
263 d4 = d2b2[(trailing >> 10) & 0x3ff];
264 d5 = d2b[(trailing) & 0x3ff];
265 bcoeff = (d5 + d4 + d3) + d2 + d1 + d0;
266 exp += (comb & 0xff);
267 mask = 1;
268 mask <<= 53;
269 if (bcoeff < mask) { /* check whether coefficient fits in 10*5+3 bits */
270 res = exp;
271 res <<= 53;
272 res |= (bcoeff | sign);
273 *pres = res;
274 return;
275 }
276 /* special format */
277 res = (exp << 51) | (sign | 0x6000000000000000ull);
278 /* add coeff, without leading bits */
279 mask = (mask >> 2) - 1;
280 bcoeff &= mask;
281 res |= bcoeff;
282 }
283 *pres = res;
284}
285
286void _bid_to_dpd128 (_Decimal128 *, _Decimal128 *);
287
288void
289_bid_to_dpd128 (_Decimal128 *pres, _Decimal128 *px) {
290 UINT128 res;
291 UINT128 sign;
292 unsigned int comb;
293 UINT128 bcoeff;
294 UINT128 dcoeff;
295 UINT128 BH, d1018, BT2, BT1;
296 UINT64 exp, BL, d109;
297 UINT64 d106, d103;
298 UINT64 k1, k2, k4, k5, k7, k8, k10, k11;
299 unsigned int BHH32, BLL32, BHL32, BLH32, k0, k3, k6, k9, amount;
300 _Decimal128 x = *px;
301
302 sign.w[1] = (x.w[1] & 0x8000000000000000ull);
303 sign.w[0] = 0;
304 comb = (x.w[1] /*& 0x7fffc00000000000ull */ ) >> 46;
305 exp = 0;
306 if ((comb & 0x1e000) == 0x1e000) {
307 if ((comb & 0x1f000) == 0x1f000) { /* G0..G4 = 11111 -> NaN */
308 if (comb & 0x01000) { /* G5 = 1 -> sNaN */
309 res = x;
310 } else { /* G5 = 0 -> qNaN */
311 res = x;
312 }
313 } else { /* G0..G4 = 11110 -> INF */
314 res = x;
315 }
316 } else { /* normal number */
317 exp = ((x.w[1] & 0x7fff000000000000ull) >> 49) & 0x3fff;
318 bcoeff.w[1] = (x.w[1] & 0x0001ffffffffffffull);
319 bcoeff.w[0] = x.w[0];
320 d1018 = reciprocals10_128[18];
321 __mul_128x128_high (BH, bcoeff, d1018);
322 amount = recip_scale[18];
323 BH.w[0] = (BH.w[0] >> amount) | (BH.w[1] << (64 - amount));
324 BL = bcoeff.w[0] - BH.w[0] * 1000000000000000000ull;
325 d109 = reciprocals10_64[9];
326 __mul_64x64_to_128 (BT1, BH.w[0], d109);
327 BHH32 = (unsigned int) (BT1.w[1] >> short_recip_scale[9]);
328 BHL32 = (unsigned int) BH.w[0] - BHH32 * 1000000000;
329 __mul_64x64_to_128 (BT2, BL, d109);
330 BLH32 = (unsigned int) (BT2.w[1] >> short_recip_scale[9]);
331 BLL32 = (unsigned int) BL - BLH32 * 1000000000;
332 d106 = 0x431BDE83;
333 d103 = 0x4189374c;
334 k0 = ((UINT64) BHH32 * d106) >> (32 + 18);
335 BHH32 -= (unsigned int) k0 *1000000;
336 k1 = ((UINT64) BHH32 * d103) >> (32 + 8);
337 k2 = BHH32 - (unsigned int) k1 *1000;
338 k3 = ((UINT64) BHL32 * d106) >> (32 + 18);
339 BHL32 -= (unsigned int) k3 *1000000;
340 k4 = ((UINT64) BHL32 * d103) >> (32 + 8);
341 k5 = BHL32 - (unsigned int) k4 *1000;
342 k6 = ((UINT64) BLH32 * d106) >> (32 + 18);
343 BLH32 -= (unsigned int) k6 *1000000;
344 k7 = ((UINT64) BLH32 * d103) >> (32 + 8);
345 k8 = BLH32 - (unsigned int) k7 *1000;
346 k9 = ((UINT64) BLL32 * d106) >> (32 + 18);
347 BLL32 -= (unsigned int) k9 *1000000;
348 k10 = ((UINT64) BLL32 * d103) >> (32 + 8);
349 k11 = BLL32 - (unsigned int) k10 *1000;
350 dcoeff.w[1] = (b2d[k5] >> 4) | (b2d[k4] << 6) | (b2d[k3] << 16) |
351 (b2d[k2] << 26) | (b2d[k1] << 36);
352 dcoeff.w[0] = b2d[k11] | (b2d[k10] << 10) | (b2d[k9] << 20) |
353 (b2d[k8] << 30) | (b2d[k7] << 40) | (b2d[k6] << 50) | (b2d[k5] << 60);
354 res.w[0] = dcoeff.w[0];
355 if (k0 >= 8) {
356 res.w[1] = sign.w[1] | ((0x18000 | ((exp >> 12) << 13) |
357 ((k0 & 1) << 12) | (exp & 0xfff)) << 46) | dcoeff.w[1];
358 } else {
359 res.w[1] = sign.w[1] | ((((exp >> 12) << 15) | (k0 << 12) |
360 (exp & 0xfff)) << 46) | dcoeff.w[1];
361 }
362 }
363 *pres = res;
364}
365
366void _dpd_to_bid128 (_Decimal128 *, _Decimal128 *);
367
368void
369_dpd_to_bid128 (_Decimal128 *pres, _Decimal128 *px) {
370 UINT128 res;
371 UINT128 sign;
372 UINT64 exp, comb;
373 UINT128 trailing;
374 UINT64 d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11;
375 UINT128 bcoeff;
376 UINT64 tl, th;
377 _Decimal128 x = *px;
378
379 sign.w[1] = (x.w[1] & 0x8000000000000000ull);
380 sign.w[0] = 0;
381 comb = (x.w[1] & 0x7fffc00000000000ull) >> 46;
382 trailing.w[1] = x.w[1];
383 trailing.w[0] = x.w[0];
384 if ((comb & 0x1e000) == 0x1e000) {
385 if ((comb & 0x1f000) == 0x1f000) { /* G0..G4 = 11111 -> NaN */
386 if (comb & 0x01000) { /* G5 = 1 -> sNaN */
387 *pres = x;
388 } else { /* G5 = 0 -> qNaN */
389 *pres = x;
390 }
391 } else { /* G0..G4 = 11110 -> INF */
392 *pres = x;
393 }
394 return;
395 } else { /* Normal number */
396 if ((comb & 0x18000) == 0x18000) { /* G0..G1 = 11 -> d0 = 8 + G4 */
397 d0 = d2b6[8 + ((comb & 0x01000) >> 12)];
398 exp = (comb & 0x06000) >> 1; /* exp leading bits are G2..G3 */
399 } else {
400 d0 = d2b6[((comb & 0x07000) >> 12)];
401 exp = (comb & 0x18000) >> 3; /* exp loading bits are G0..G1 */
402 }
403 d11 = d2b[(trailing.w[0]) & 0x3ff];
404 d10 = d2b2[(trailing.w[0] >> 10) & 0x3ff];
405 d9 = d2b3[(trailing.w[0] >> 20) & 0x3ff];
406 d8 = d2b4[(trailing.w[0] >> 30) & 0x3ff];
407 d7 = d2b5[(trailing.w[0] >> 40) & 0x3ff];
408 d6 = d2b6[(trailing.w[0] >> 50) & 0x3ff];
409 d5 = d2b[(trailing.w[0] >> 60) | ((trailing.w[1] & 0x3f) << 4)];
410 d4 = d2b2[(trailing.w[1] >> 6) & 0x3ff];
411 d3 = d2b3[(trailing.w[1] >> 16) & 0x3ff];
412 d2 = d2b4[(trailing.w[1] >> 26) & 0x3ff];
413 d1 = d2b5[(trailing.w[1] >> 36) & 0x3ff];
414 tl = d11 + d10 + d9 + d8 + d7 + d6;
415 th = d5 + d4 + d3 + d2 + d1 + d0;
416 __mul_64x64_to_128 (bcoeff, th, 1000000000000000000ull);
417 __add_128_64 (bcoeff, bcoeff, tl);
418 exp += (comb & 0xfff);
419 res.w[0] = bcoeff.w[0];
420 res.w[1] = (exp << 49) | sign.w[1] | bcoeff.w[1];
421 }
422 *pres = res;
423}
This page took 0.238967 seconds and 4 git commands to generate.