]> Git Repo - qemu.git/blame - target-ppc/dfp_helper.c
target-ppc: Introduce DFP Convert to Long/Extended
[qemu.git] / target-ppc / dfp_helper.c
CommitLineData
7b0c0d66
TM
1/*
2 * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU.
3 *
4 * Copyright (c) 2014 IBM Corporation.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
21#include "exec/helper-proto.h"
22
23#define DECNUMDIGITS 34
24#include "libdecnumber/decContext.h"
25#include "libdecnumber/decNumber.h"
26#include "libdecnumber/dpd/decimal32.h"
27#include "libdecnumber/dpd/decimal64.h"
28#include "libdecnumber/dpd/decimal128.h"
29
30#if defined(HOST_WORDS_BIGENDIAN)
31#define HI_IDX 0
32#define LO_IDX 1
33#else
34#define HI_IDX 1
35#define LO_IDX 0
36#endif
37
38struct PPC_DFP {
39 CPUPPCState *env;
40 uint64_t t64[2], a64[2], b64[2];
41 decNumber t, a, b;
42 decContext context;
43 uint8_t crbf;
44};
45
46static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr)
47{
48 enum rounding rnd;
49
50 switch ((fpscr >> 32) & 0x7) {
51 case 0:
52 rnd = DEC_ROUND_HALF_EVEN;
53 break;
54 case 1:
55 rnd = DEC_ROUND_DOWN;
56 break;
57 case 2:
58 rnd = DEC_ROUND_CEILING;
59 break;
60 case 3:
61 rnd = DEC_ROUND_FLOOR;
62 break;
63 case 4:
64 rnd = DEC_ROUND_HALF_UP;
65 break;
66 case 5:
67 rnd = DEC_ROUND_HALF_DOWN;
68 break;
69 case 6:
70 rnd = DEC_ROUND_UP;
71 break;
72 case 7:
73 rnd = DEC_ROUND_05UP;
74 break;
75 default:
76 g_assert_not_reached();
77 }
78
79 decContextSetRounding(context, rnd);
80}
81
5826ebe2
TM
82static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc,
83 struct PPC_DFP *dfp)
84{
85 enum rounding rnd;
86 if (r == 0) {
87 switch (rmc & 3) {
88 case 0:
89 rnd = DEC_ROUND_HALF_EVEN;
90 break;
91 case 1:
92 rnd = DEC_ROUND_DOWN;
93 break;
94 case 2:
95 rnd = DEC_ROUND_HALF_UP;
96 break;
97 case 3: /* use FPSCR rounding mode */
98 return;
99 default:
100 assert(0); /* cannot get here */
101 }
102 } else { /* r == 1 */
103 switch (rmc & 3) {
104 case 0:
105 rnd = DEC_ROUND_CEILING;
106 break;
107 case 1:
108 rnd = DEC_ROUND_FLOOR;
109 break;
110 case 2:
111 rnd = DEC_ROUND_UP;
112 break;
113 case 3:
114 rnd = DEC_ROUND_HALF_DOWN;
115 break;
116 default:
117 assert(0); /* cannot get here */
118 }
119 }
120 decContextSetRounding(&dfp->context, rnd);
121}
122
7b0c0d66
TM
123static void dfp_prepare_decimal64(struct PPC_DFP *dfp, uint64_t *a,
124 uint64_t *b, CPUPPCState *env)
125{
126 decContextDefault(&dfp->context, DEC_INIT_DECIMAL64);
127 dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
128 dfp->env = env;
129
130 if (a) {
131 dfp->a64[0] = *a;
132 decimal64ToNumber((decimal64 *)dfp->a64, &dfp->a);
133 } else {
134 dfp->a64[0] = 0;
135 decNumberZero(&dfp->a);
136 }
137
138 if (b) {
139 dfp->b64[0] = *b;
140 decimal64ToNumber((decimal64 *)dfp->b64, &dfp->b);
141 } else {
142 dfp->b64[0] = 0;
143 decNumberZero(&dfp->b);
144 }
145}
146
7b0c0d66
TM
147static void dfp_prepare_decimal128(struct PPC_DFP *dfp, uint64_t *a,
148 uint64_t *b, CPUPPCState *env)
149{
150 decContextDefault(&dfp->context, DEC_INIT_DECIMAL128);
151 dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
152 dfp->env = env;
153
154 if (a) {
155 dfp->a64[0] = a[HI_IDX];
156 dfp->a64[1] = a[LO_IDX];
157 decimal128ToNumber((decimal128 *)dfp->a64, &dfp->a);
158 } else {
159 dfp->a64[0] = dfp->a64[1] = 0;
160 decNumberZero(&dfp->a);
161 }
162
163 if (b) {
164 dfp->b64[0] = b[HI_IDX];
165 dfp->b64[1] = b[LO_IDX];
166 decimal128ToNumber((decimal128 *)dfp->b64, &dfp->b);
167 } else {
168 dfp->b64[0] = dfp->b64[1] = 0;
169 decNumberZero(&dfp->b);
170 }
171}
27722744
TM
172
173#define FP_FX (1ull << FPSCR_FX)
174#define FP_FEX (1ull << FPSCR_FEX)
175#define FP_OX (1ull << FPSCR_OX)
176#define FP_OE (1ull << FPSCR_OE)
177#define FP_UX (1ull << FPSCR_UX)
178#define FP_UE (1ull << FPSCR_UE)
179#define FP_XX (1ull << FPSCR_XX)
180#define FP_XE (1ull << FPSCR_XE)
181#define FP_ZX (1ull << FPSCR_ZX)
182#define FP_ZE (1ull << FPSCR_ZE)
183#define FP_VX (1ull << FPSCR_VX)
184#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
185#define FP_VXISI (1ull << FPSCR_VXISI)
186#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
187#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
188#define FP_VXIDI (1ull << FPSCR_VXIDI)
189#define FP_VXVC (1ull << FPSCR_VXVC)
190#define FP_VXCVI (1ull << FPSCR_VXCVI)
191#define FP_VE (1ull << FPSCR_VE)
192#define FP_FI (1ull << FPSCR_FI)
193
27722744
TM
194static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag,
195 uint64_t enabled)
196{
197 dfp->env->fpscr |= (flag | FP_FX);
198 if (dfp->env->fpscr & enabled) {
199 dfp->env->fpscr |= FP_FEX;
200 }
201}
a9d7ba03
TM
202
203static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp,
204 decContext *context)
205{
206 uint64_t fprf = 0;
207
208 /* construct FPRF */
209 switch (decNumberClass(&dfp->t, context)) {
210 case DEC_CLASS_SNAN:
211 fprf = 0x01;
212 break;
213 case DEC_CLASS_QNAN:
214 fprf = 0x11;
215 break;
216 case DEC_CLASS_NEG_INF:
217 fprf = 0x09;
218 break;
219 case DEC_CLASS_NEG_NORMAL:
220 fprf = 0x08;
221 break;
222 case DEC_CLASS_NEG_SUBNORMAL:
223 fprf = 0x18;
224 break;
225 case DEC_CLASS_NEG_ZERO:
226 fprf = 0x12;
227 break;
228 case DEC_CLASS_POS_ZERO:
229 fprf = 0x02;
230 break;
231 case DEC_CLASS_POS_SUBNORMAL:
232 fprf = 0x14;
233 break;
234 case DEC_CLASS_POS_NORMAL:
235 fprf = 0x04;
236 break;
237 case DEC_CLASS_POS_INF:
238 fprf = 0x05;
239 break;
240 default:
241 assert(0); /* should never get here */
242 }
243 dfp->env->fpscr &= ~(0x1F << 12);
244 dfp->env->fpscr |= (fprf << 12);
245}
246
247static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp)
248{
249 dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context);
250}
251
252static void dfp_check_for_OX(struct PPC_DFP *dfp)
253{
254 if (dfp->context.status & DEC_Overflow) {
255 dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE);
256 }
257}
258
259static void dfp_check_for_UX(struct PPC_DFP *dfp)
260{
261 if (dfp->context.status & DEC_Underflow) {
262 dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE);
263 }
264}
265
266static void dfp_check_for_XX(struct PPC_DFP *dfp)
267{
268 if (dfp->context.status & DEC_Inexact) {
269 dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE);
270 }
271}
272
9024ff40
TM
273static void dfp_check_for_ZX(struct PPC_DFP *dfp)
274{
275 if (dfp->context.status & DEC_Division_by_zero) {
276 dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE);
277 }
278}
279
a9d7ba03
TM
280static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp)
281{
282 if (dfp->context.status & DEC_Invalid_operation) {
283 if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) {
284 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
285 }
286 }
287}
288
290d9ee5
TM
289static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp)
290{
291 if (decNumberIsSNaN(&dfp->t)) {
292 dfp->t.bits &= ~DECSNAN;
293 dfp->t.bits |= DECNAN;
294 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
295 }
296}
297
a9d7ba03
TM
298static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign)
299{
300 if (dfp->context.status & DEC_Invalid_operation) {
301 if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
302 int same = decNumberClass(&dfp->a, &dfp->context) ==
303 decNumberClass(&dfp->b, &dfp->context);
304 if ((same && testForSameSign) || (!same && !testForSameSign)) {
305 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE);
306 }
307 }
308 }
309}
310
311static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp)
312{
313 dfp_check_for_VXISI(dfp, 0);
314}
315
2128f8a5
TM
316static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp)
317{
318 dfp_check_for_VXISI(dfp, 1);
319}
320
8de6a1cc
TM
321static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp)
322{
323 if (dfp->context.status & DEC_Invalid_operation) {
324 if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) ||
325 (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) {
326 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE);
327 }
328 }
329}
330
9024ff40
TM
331static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp)
332{
333 if (dfp->context.status & DEC_Division_undefined) {
334 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE);
335 }
336}
337
338static void dfp_check_for_VXIDI(struct PPC_DFP *dfp)
339{
340 if (dfp->context.status & DEC_Invalid_operation) {
341 if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
342 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE);
343 }
344 }
345}
346
5833505b
TM
347static void dfp_check_for_VXVC(struct PPC_DFP *dfp)
348{
349 if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) {
350 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE);
351 }
352}
353
5826ebe2
TM
354static void dfp_check_for_VXCVI(struct PPC_DFP *dfp)
355{
356 if ((dfp->context.status & DEC_Invalid_operation) &&
357 (!decNumberIsSNaN(&dfp->a)) &&
358 (!decNumberIsSNaN(&dfp->b))) {
359 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
360 }
361}
362
5833505b
TM
363static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp)
364{
365 if (decNumberIsNaN(&dfp->t)) {
366 dfp->crbf = 1;
367 } else if (decNumberIsZero(&dfp->t)) {
368 dfp->crbf = 2;
369 } else if (decNumberIsNegative(&dfp->t)) {
370 dfp->crbf = 8;
371 } else {
372 dfp->crbf = 4;
373 }
374}
375
376static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp)
377{
378 dfp->env->fpscr &= ~(0xF << 12);
379 dfp->env->fpscr |= (dfp->crbf << 12);
380}
381
5826ebe2
TM
382static inline void dfp_makeQNaN(decNumber *dn)
383{
384 dn->bits &= ~DECSPECIAL;
385 dn->bits |= DECNAN;
386}
387
512918aa
TM
388static inline int dfp_get_digit(decNumber *dn, int n)
389{
390 assert(DECDPUN == 3);
391 int unit = n / DECDPUN;
392 int dig = n % DECDPUN;
393 switch (dig) {
394 case 0:
395 return dn->lsu[unit] % 10;
396 case 1:
397 return (dn->lsu[unit] / 10) % 10;
398 case 2:
399 return dn->lsu[unit] / 100;
400 default:
401 assert(0);
402 }
403}
404
a9d7ba03
TM
405#define DFP_HELPER_TAB(op, dnop, postprocs, size) \
406void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, uint64_t *b) \
407{ \
408 struct PPC_DFP dfp; \
409 dfp_prepare_decimal##size(&dfp, a, b, env); \
410 dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
411 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
412 postprocs(&dfp); \
413 if (size == 64) { \
414 t[0] = dfp.t64[0]; \
415 } else if (size == 128) { \
416 t[0] = dfp.t64[HI_IDX]; \
417 t[1] = dfp.t64[LO_IDX]; \
418 } \
419}
420
421static void ADD_PPs(struct PPC_DFP *dfp)
422{
423 dfp_set_FPRF_from_FRT(dfp);
424 dfp_check_for_OX(dfp);
425 dfp_check_for_UX(dfp);
426 dfp_check_for_XX(dfp);
427 dfp_check_for_VXSNAN(dfp);
428 dfp_check_for_VXISI_add(dfp);
429}
430
431DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64)
432DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128)
2128f8a5
TM
433
434static void SUB_PPs(struct PPC_DFP *dfp)
435{
436 dfp_set_FPRF_from_FRT(dfp);
437 dfp_check_for_OX(dfp);
438 dfp_check_for_UX(dfp);
439 dfp_check_for_XX(dfp);
440 dfp_check_for_VXSNAN(dfp);
441 dfp_check_for_VXISI_subtract(dfp);
442}
443
444DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64)
445DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128)
8de6a1cc
TM
446
447static void MUL_PPs(struct PPC_DFP *dfp)
448{
449 dfp_set_FPRF_from_FRT(dfp);
450 dfp_check_for_OX(dfp);
451 dfp_check_for_UX(dfp);
452 dfp_check_for_XX(dfp);
453 dfp_check_for_VXSNAN(dfp);
454 dfp_check_for_VXIMZ(dfp);
455}
456
457DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64)
458DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128)
9024ff40
TM
459
460static void DIV_PPs(struct PPC_DFP *dfp)
461{
462 dfp_set_FPRF_from_FRT(dfp);
463 dfp_check_for_OX(dfp);
464 dfp_check_for_UX(dfp);
465 dfp_check_for_ZX(dfp);
466 dfp_check_for_XX(dfp);
467 dfp_check_for_VXSNAN(dfp);
468 dfp_check_for_VXZDZ(dfp);
469 dfp_check_for_VXIDI(dfp);
470}
471
472DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64)
473DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128)
5833505b
TM
474
475#define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \
476uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
477{ \
478 struct PPC_DFP dfp; \
479 dfp_prepare_decimal##size(&dfp, a, b, env); \
480 dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
481 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
482 postprocs(&dfp); \
483 return dfp.crbf; \
484}
485
486static void CMPU_PPs(struct PPC_DFP *dfp)
487{
488 dfp_set_CRBF_from_T(dfp);
489 dfp_set_FPCC_from_CRBF(dfp);
490 dfp_check_for_VXSNAN(dfp);
491}
492
493DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64)
494DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128)
495
496static void CMPO_PPs(struct PPC_DFP *dfp)
497{
498 dfp_set_CRBF_from_T(dfp);
499 dfp_set_FPCC_from_CRBF(dfp);
500 dfp_check_for_VXSNAN(dfp);
501 dfp_check_for_VXVC(dfp);
502}
503
504DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64)
505DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128)
e601c1ee
TM
506
507#define DFP_HELPER_TSTDC(op, size) \
508uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint32_t dcm) \
509{ \
510 struct PPC_DFP dfp; \
511 int match = 0; \
512 \
513 dfp_prepare_decimal##size(&dfp, a, 0, env); \
514 \
515 match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \
516 match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \
517 match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \
518 match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \
519 match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \
520 match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \
521 \
522 if (decNumberIsNegative(&dfp.a)) { \
523 dfp.crbf = match ? 0xA : 0x8; \
524 } else { \
525 dfp.crbf = match ? 0x2 : 0x0; \
526 } \
527 \
528 dfp_set_FPCC_from_CRBF(&dfp); \
529 return dfp.crbf; \
530}
531
532DFP_HELPER_TSTDC(dtstdc, 64)
533DFP_HELPER_TSTDC(dtstdcq, 128)
1bf9c0e1
TM
534
535#define DFP_HELPER_TSTDG(op, size) \
536uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint32_t dcm) \
537{ \
538 struct PPC_DFP dfp; \
539 int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \
540 is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \
541 match; \
542 \
543 dfp_prepare_decimal##size(&dfp, a, 0, env); \
544 \
545 if ((size) == 64) { \
546 minexp = -398; \
547 maxexp = 369; \
548 nzero_digits = 16; \
549 nzero_idx = 5; \
550 } else if ((size) == 128) { \
551 minexp = -6176; \
552 maxexp = 6111; \
553 nzero_digits = 34; \
554 nzero_idx = 11; \
555 } \
556 \
557 is_negative = decNumberIsNegative(&dfp.a); \
558 is_zero = decNumberIsZero(&dfp.a); \
559 is_extreme_exp = (dfp.a.exponent == maxexp) || \
560 (dfp.a.exponent == minexp); \
561 is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \
562 is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \
563 leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \
564 (dfp.a.lsu[nzero_idx] != 0); \
565 match = 0; \
566 \
567 match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \
568 match |= (dcm & 0x10) && is_zero && is_extreme_exp; \
569 match |= (dcm & 0x08) && \
570 (is_subnormal || (is_normal && is_extreme_exp)); \
571 match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \
572 !leftmost_is_nonzero; \
573 match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \
574 leftmost_is_nonzero; \
575 match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \
576 \
577 if (is_negative) { \
578 dfp.crbf = match ? 0xA : 0x8; \
579 } else { \
580 dfp.crbf = match ? 0x2 : 0x0; \
581 } \
582 \
583 dfp_set_FPCC_from_CRBF(&dfp); \
584 return dfp.crbf; \
585}
586
587DFP_HELPER_TSTDG(dtstdg, 64)
588DFP_HELPER_TSTDG(dtstdgq, 128)
f3d2b0bc
TM
589
590#define DFP_HELPER_TSTEX(op, size) \
591uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
592{ \
593 struct PPC_DFP dfp; \
594 int expa, expb, a_is_special, b_is_special; \
595 \
596 dfp_prepare_decimal##size(&dfp, a, b, env); \
597 \
598 expa = dfp.a.exponent; \
599 expb = dfp.b.exponent; \
600 a_is_special = decNumberIsSpecial(&dfp.a); \
601 b_is_special = decNumberIsSpecial(&dfp.b); \
602 \
603 if (a_is_special || b_is_special) { \
604 int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \
605 int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \
606 dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \
607 } else if (expa < expb) { \
608 dfp.crbf = 0x8; \
609 } else if (expa > expb) { \
610 dfp.crbf = 0x4; \
611 } else { \
612 dfp.crbf = 0x2; \
613 } \
614 \
615 dfp_set_FPCC_from_CRBF(&dfp); \
616 return dfp.crbf; \
617}
618
619DFP_HELPER_TSTEX(dtstex, 64)
620DFP_HELPER_TSTEX(dtstexq, 128)
f6022a76
TM
621
622#define DFP_HELPER_TSTSF(op, size) \
623uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
624{ \
625 struct PPC_DFP dfp; \
626 unsigned k; \
627 \
628 dfp_prepare_decimal##size(&dfp, 0, b, env); \
629 \
630 k = *a & 0x3F; \
631 \
632 if (unlikely(decNumberIsSpecial(&dfp.b))) { \
633 dfp.crbf = 1; \
634 } else if (k == 0) { \
635 dfp.crbf = 4; \
636 } else if (unlikely(decNumberIsZero(&dfp.b))) { \
637 /* Zero has no sig digits */ \
638 dfp.crbf = 4; \
639 } else { \
640 unsigned nsd = dfp.b.digits; \
641 if (k < nsd) { \
642 dfp.crbf = 8; \
643 } else if (k > nsd) { \
644 dfp.crbf = 4; \
645 } else { \
646 dfp.crbf = 2; \
647 } \
648 } \
649 \
650 dfp_set_FPCC_from_CRBF(&dfp); \
651 return dfp.crbf; \
652}
653
654DFP_HELPER_TSTSF(dtstsf, 64)
655DFP_HELPER_TSTSF(dtstsfq, 128)
5826ebe2
TM
656
657static void QUA_PPs(struct PPC_DFP *dfp)
658{
659 dfp_set_FPRF_from_FRT(dfp);
660 dfp_check_for_XX(dfp);
661 dfp_check_for_VXSNAN(dfp);
662 dfp_check_for_VXCVI(dfp);
663}
664
665static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp)
666{
667 dfp_set_round_mode_from_immediate(0, rmc, dfp);
668 decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context);
669 if (decNumberIsSNaN(&dfp->a)) {
670 dfp->t = dfp->a;
671 dfp_makeQNaN(&dfp->t);
672 } else if (decNumberIsSNaN(&dfp->b)) {
673 dfp->t = dfp->b;
674 dfp_makeQNaN(&dfp->t);
675 } else if (decNumberIsQNaN(&dfp->a)) {
676 dfp->t = dfp->a;
677 } else if (decNumberIsQNaN(&dfp->b)) {
678 dfp->t = dfp->b;
679 }
680}
681
682#define DFP_HELPER_QUAI(op, size) \
683void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, \
684 uint32_t te, uint32_t rmc) \
685{ \
686 struct PPC_DFP dfp; \
687 \
688 dfp_prepare_decimal##size(&dfp, 0, b, env); \
689 \
690 decNumberFromUInt32(&dfp.a, 1); \
691 dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \
692 \
693 dfp_quantize(rmc, &dfp); \
694 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
695 &dfp.context); \
696 QUA_PPs(&dfp); \
697 \
698 if (size == 64) { \
699 t[0] = dfp.t64[0]; \
700 } else if (size == 128) { \
701 t[0] = dfp.t64[HI_IDX]; \
702 t[1] = dfp.t64[LO_IDX]; \
703 } \
704}
705
706DFP_HELPER_QUAI(dquai, 64)
707DFP_HELPER_QUAI(dquaiq, 128)
708
709#define DFP_HELPER_QUA(op, size) \
710void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
711 uint64_t *b, uint32_t rmc) \
712{ \
713 struct PPC_DFP dfp; \
714 \
715 dfp_prepare_decimal##size(&dfp, a, b, env); \
716 \
717 dfp_quantize(rmc, &dfp); \
718 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
719 &dfp.context); \
720 QUA_PPs(&dfp); \
721 \
722 if (size == 64) { \
723 t[0] = dfp.t64[0]; \
724 } else if (size == 128) { \
725 t[0] = dfp.t64[HI_IDX]; \
726 t[1] = dfp.t64[LO_IDX]; \
727 } \
728}
729
730DFP_HELPER_QUA(dqua, 64)
731DFP_HELPER_QUA(dquaq, 128)
512918aa
TM
732
733static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax,
734 struct PPC_DFP *dfp)
735{
736 int msd_orig, msd_rslt;
737
738 if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) {
739 dfp->t = dfp->b;
740 if (decNumberIsSNaN(&dfp->b)) {
741 dfp_makeQNaN(&dfp->t);
742 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE);
743 }
744 return;
745 }
746
747 /* Reround is equivalent to quantizing b with 1**E(n) where */
748 /* n = exp(b) + numDigits(b) - reference_significance. */
749
750 decNumberFromUInt32(&dfp->a, 1);
751 dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig;
752
753 if (unlikely(dfp->a.exponent > xmax)) {
754 dfp->t.digits = 0;
755 dfp->t.bits &= ~DECNEG;
756 dfp_makeQNaN(&dfp->t);
757 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
758 return;
759 }
760
761 dfp_quantize(rmc, dfp);
762
763 msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1);
764 msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1);
765
766 /* If the quantization resulted in rounding up to the next magnitude, */
767 /* then we need to shift the significand and adjust the exponent. */
768
769 if (unlikely((msd_orig == 9) && (msd_rslt == 1))) {
770
771 decNumber negone;
772
773 decNumberFromInt32(&negone, -1);
774 decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context);
775 dfp->t.exponent++;
776
777 if (unlikely(dfp->t.exponent > xmax)) {
778 dfp_makeQNaN(&dfp->t);
779 dfp->t.digits = 0;
780 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
781 /* Inhibit XX in this case */
782 decContextClearStatus(&dfp->context, DEC_Inexact);
783 }
784 }
785}
786
787#define DFP_HELPER_RRND(op, size) \
788void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
789 uint64_t *b, uint32_t rmc) \
790{ \
791 struct PPC_DFP dfp; \
792 int32_t ref_sig = *a & 0x3F; \
793 int32_t xmax = ((size) == 64) ? 369 : 6111; \
794 \
795 dfp_prepare_decimal##size(&dfp, 0, b, env); \
796 \
797 _dfp_reround(rmc, ref_sig, xmax, &dfp); \
798 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
799 &dfp.context); \
800 QUA_PPs(&dfp); \
801 \
802 if (size == 64) { \
803 t[0] = dfp.t64[0]; \
804 } else if (size == 128) { \
805 t[0] = dfp.t64[HI_IDX]; \
806 t[1] = dfp.t64[LO_IDX]; \
807 } \
808}
809
810DFP_HELPER_RRND(drrnd, 64)
811DFP_HELPER_RRND(drrndq, 128)
97c0d930
TM
812
813#define DFP_HELPER_RINT(op, postprocs, size) \
814void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, \
815 uint32_t r, uint32_t rmc) \
816{ \
817 struct PPC_DFP dfp; \
818 \
819 dfp_prepare_decimal##size(&dfp, 0, b, env); \
820 \
821 dfp_set_round_mode_from_immediate(r, rmc, &dfp); \
822 decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \
823 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
824 postprocs(&dfp); \
825 \
826 if (size == 64) { \
827 t[0] = dfp.t64[0]; \
828 } else if (size == 128) { \
829 t[0] = dfp.t64[HI_IDX]; \
830 t[1] = dfp.t64[LO_IDX]; \
831 } \
832}
833
834static void RINTX_PPs(struct PPC_DFP *dfp)
835{
836 dfp_set_FPRF_from_FRT(dfp);
837 dfp_check_for_XX(dfp);
838 dfp_check_for_VXSNAN(dfp);
839}
840
841DFP_HELPER_RINT(drintx, RINTX_PPs, 64)
842DFP_HELPER_RINT(drintxq, RINTX_PPs, 128)
843
844static void RINTN_PPs(struct PPC_DFP *dfp)
845{
846 dfp_set_FPRF_from_FRT(dfp);
847 dfp_check_for_VXSNAN(dfp);
848}
849
850DFP_HELPER_RINT(drintn, RINTN_PPs, 64)
851DFP_HELPER_RINT(drintnq, RINTN_PPs, 128)
290d9ee5
TM
852
853void helper_dctdp(CPUPPCState *env, uint64_t *t, uint64_t *b)
854{
855 struct PPC_DFP dfp;
856 uint32_t b_short = *b;
857 dfp_prepare_decimal64(&dfp, 0, 0, env);
858 decimal32ToNumber((decimal32 *)&b_short, &dfp.t);
859 decimal64FromNumber((decimal64 *)t, &dfp.t, &dfp.context);
860 dfp_set_FPRF_from_FRT(&dfp);
861}
862
863void helper_dctqpq(CPUPPCState *env, uint64_t *t, uint64_t *b)
864{
865 struct PPC_DFP dfp;
866 dfp_prepare_decimal128(&dfp, 0, 0, env);
867 decimal64ToNumber((decimal64 *)b, &dfp.t);
868
869 dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
870 dfp_set_FPRF_from_FRT(&dfp);
871
872 decimal128FromNumber((decimal128 *)&dfp.t64, &dfp.t, &dfp.context);
873 t[0] = dfp.t64[HI_IDX];
874 t[1] = dfp.t64[LO_IDX];
875}
This page took 0.118885 seconds and 4 git commands to generate.